diff --git a/.gitignore b/.gitignore index 7867032e438..8f28dbc7164 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ .project .settings .tags +.vscode tags *,cover @@ -17,8 +18,8 @@ tags # an accidental add. perf_insights/perf_insights/download_traces.py -# devil's binary dependency download folder. -/devil/bin/deps/ +# test coverage data +.coverage # telemetry's binary dependency download folders. /telemetry/telemetry/internal/bin/ diff --git a/.vpython b/.vpython index 56cc3a986dc..2a3172b783a 100644 --- a/.vpython +++ b/.vpython @@ -45,7 +45,7 @@ wheel: < > wheel: < name: "infra/python/wheels/werkzeug-py2_py3" - version: "version:0.15.2" + version: "version:1.0.1" > wheel: < name: "infra/python/wheels/click-py2_py3" @@ -183,3 +183,10 @@ wheel: < platform: "win_amd64" > > + +# Used by: +# telemetry/telemetry/internal/backends/chrome/remote_cast_browser_backend.py +wheel: < + name: "infra/python/wheels/pexpect/${vpython_platform}" + version: "version:4.8.0.chromium.1" +> diff --git a/.vpython3 b/.vpython3 index 605efa264b7..ff54289b3bc 100644 --- a/.vpython3 +++ b/.vpython3 @@ -31,18 +31,255 @@ python_version: "3.8" # //telemetry/telemetry/internal/util/external_modules.py wheel: < name: "infra/python/wheels/numpy/${vpython_platform}" - version: "version:1.20.3" - # A newer version of numpy is required on ARM64, but it breaks older OS versions. + version: "version:1.2x.supported.1" +> +wheel: < + name: "infra/python/wheels/opencv_python/${vpython_platform}" + version: "version:4.5.3.56.chromium.4" + # There is currently no Linux arm/arm64 version in CIPD. not_match_tag < - platform: "macosx_11_0_arm64" + platform: "linux_aarch64" > > + +# Used by: +# vpython3 bin/run_py_test +# This is used in pre-submit try jobs, which used to rely on gae-sdk from cipd, +# and in post-submit cloud biulds, which used to rely on google/cloud-sdk +# docker image. Both sources are out of date and do not support python 3. wheel: < - name: "infra/python/wheels/numpy/mac-arm64_cp38_cp38" - version: "version:1.21.1" - match_tag < - platform: "macosx_11_0_arm64" - > + name: "infra/python/wheels/appengine-python-standard-py3" + version: "version:0.3.1" +> + +wheel: < + name: "infra/python/wheels/frozendict-py3" + version: "version:2.0.6" +> + +wheel: < + name: "infra/python/wheels/google-auth-py2_py3" + version: "version:1.35.0" +> + +wheel: < + name: "infra/python/wheels/pytz-py2_py3" + version: "version:2021.1" +> + +wheel: < + name: "infra/python/wheels/mock-py3" + version: "version:4.0.3" +> + +wheel: < + name: "infra/python/wheels/ruamel_yaml-py3" + version: "version:0.17.16" +> + +wheel: < + name: "infra/python/wheels/pyasn1_modules-py2_py3" + version: "version:0.2.8" +> + +wheel: < + name: "infra/python/wheels/rsa-py3" + version: "version:4.7.2" +> + +wheel: < + name: "infra/python/wheels/cachetools-py3" + version: "version:4.2.2" +> + +wheel: < + name: "infra/python/wheels/pyasn1-py2_py3" + version: "version:0.4.8" +> + +wheel: < + name: "infra/python/wheels/charset_normalizer-py3" + version: "version:2.0.4" +> + +wheel: < + name: "infra/python/wheels/ruamel_yaml_clib/${vpython_platform}" + version: "version:0.2.6" +> + +wheel: < + name: "infra/python/wheels/httplib2-py3" + version: "version:0.19.1" +> + +wheel: < + name: "infra/python/wheels/pyparsing-py2_py3" + version: "version:2.4.7" +> + +wheel: < + name: "infra/python/wheels/google-api-python-client-py3" + version: "version:2.2.0" +> + +wheel: < + name: "infra/python/wheels/google-auth-httplib2-py2_py3" + version: "version:0.1.0" +> + +wheel: < + name: "infra/python/wheels/google-api-core-py3" + version: "version:1.31.5" +> + +wheel: < + name: "infra/python/wheels/googleapis-common-protos-py2_py3" + version: "version:1.52.0" +> + +wheel: < + name: "infra/python/wheels/uritemplate-py2_py3" + version: "version:3.0.0" +> + +wheel: < + name: "infra/python/wheels/webtest-py2_py3" + version: "version:2.0.35" +> + +wheel: < + name: "infra/python/wheels/webob-py2_py3" + version: "version:1.8.6" +> + +wheel: < + name: "infra/python/wheels/waitress-py2_py3" + version: "version:1.4.3" +> + +wheel: < + name: "infra/python/wheels/beautifulsoup4-py3" + version: "version:4.9.0" +> + +wheel: < + name: "infra/python/wheels/soupsieve-py2_py3" + version: "version:1.9.5" +> + +wheel: < + name: "infra/python/wheels/jinja2-py2_py3" + version: "version:2.10.1" +> + +wheel: < + name: "infra/python/wheels/markupsafe/${vpython_platform}" + version: "version:1.1.1" +> + +wheel: < + name: "infra/python/wheels/infra_libs-py2_py3" + version: "version:2.3.0" +> + +wheel: < + name: "infra/python/wheels/oauth2client-py2_py3" + version: "version:3.0.0" +> + +wheel: < + name: "infra/python/wheels/google-cloud-logging-py3" + version: "version:3.0.0" +> + +wheel: < + name: "infra/python/wheels/google-cloud-core-py3" + version: "version:2.2.2" +> + +wheel: < + name: "infra/python/wheels/google-cloud-audit-log-py2_py3" + version: "version:0.2.0" +> + +wheel: < + name: "infra/python/wheels/ijson/${vpython_platform}" + version: "version:3.2.3" +> + +wheel: < + name: "infra/python/wheels/google-cloud-datastore-py3" + version: "version:2.1.6" +> + +wheel: < + name: "infra/python/wheels/typing-extensions-py3" + version: "version:3.7.4.3" +> + +wheel: < + name: "infra/python/wheels/typing-inspect-py3" + version: "version:0.7.1" +> + +wheel: < + name: "infra/python/wheels/libcst-py3" + version: "version:0.3.19" +> + +wheel: < + name: "infra/python/wheels/pyyaml-py3" + version: "version:5.3.1" +> + +wheel: < + name: "infra/python/wheels/mypy-extensions-py3" + version: "version:0.4.3" +> + +wheel: < + name: "infra/python/wheels/flask-talisman-py2_py3" + version: "version:0.7.0" +> + +wheel: < + name: "infra/python/wheels/grpc-google-iam-v1-py3" + version: "version:0.12.3" +> + +wheel: < + name: "infra/python/wheels/proto-plus-py3" + version: "version:1.20.3" +> + +wheel: < + name: "infra/python/wheels/google-cloud-appengine-logging-py2_py3" + version: "version:1.1.1" +> + +wheel: < + name: "infra/python/wheels/grpcio/${vpython_platform}" + version: "version:1.44.0" +> + +wheel: < + name: "infra/python/wheels/flask-py2_py3" + version: "version:1.0.2" +> + +wheel: < + name: "infra/python/wheels/werkzeug-py2_py3" + version: "version:1.0.1" +> + +wheel: < + name: "infra/python/wheels/itsdangerous-py2_py3" + version: "version:1.1.0" +> + +wheel: < + name: "infra/python/wheels/click-py2_py3" + version: "version:7.0" > # Used by: @@ -60,10 +297,16 @@ wheel: < # Used by: # build/android/pylib/local/emulator/avd.py -# components/policy/test_support/policy_testserver.py wheel: < - name: "infra/python/wheels/protobuf-py2_py3" - version: "version:3.6.1" + name: "infra/python/wheels/protobuf-py3" + version: "version:4.21.9" +> + +# Used by: +# //third_party/catapult/telemetry/telemetry/internal/backends/chrome/remote_cast_browser_backend.py +wheel: < + name: "infra/python/wheels/pexpect/${vpython_platform}" + version: "version:4.8.0.chromium.1" > # TODO(https://crbug.com/898348): Add in necessary wheels as Python3 versions @@ -73,6 +316,13 @@ wheel: < version: "version:1.15.0" > +# Used by code coverage reporting tools in: +# //third_party/catapult/third_party/coverage/coverage/ +wheel: < + name: "infra/python/wheels/coverage/${vpython_platform}" + version: "version:5.5.chromium.3" +> + # Common utilities. # For Python 2, this version needed to be compatible with the version range # specified by @@ -81,18 +331,13 @@ wheel: < # use the newer version that's currently available in CIPD for now. wheel: < name: "infra/python/wheels/psutil/${vpython_platform}" - version: "version:5.8.0.chromium.2" + version: "version:5.8.0.chromium.3" > wheel: < - name: "infra/python/wheels/requests-py2_py3" - version: "version:2.13.0" + name: "infra/python/wheels/requests-py3" + version: "version:2.31.0" > -# Used by various python unit tests. -wheel: < - name: "infra/python/wheels/mock-py2_py3" - version: "version:2.0.0" -> wheel: < name: "infra/python/wheels/parameterized-py2_py3" version: "version:0.7.1" @@ -142,7 +387,7 @@ wheel: < > wheel: < name: "infra/python/wheels/urllib3-py2_py3" - version: "version:1.24.3" + version: "version:1.26.6" > wheel: < name: "infra/python/wheels/blessings-py2_py3" @@ -193,11 +438,6 @@ wheel: < version: "version:16.8" > -wheel: < - name: "infra/python/wheels/pyparsing-py2_py3" - version: "version:2.2.0" -> - wheel: < name: "infra/python/wheels/toml-py3" version: "version:0.10.1" @@ -215,12 +455,7 @@ wheel < wheel < name: "infra/python/wheels/attrs-py2_py3" - version: "version:20.3.0" -> - -wheel < - name: "infra/python/wheels/six-py2_py3" - version: "version:1.15.0" + version: "version:21.4.0" > wheel < @@ -252,3 +487,8 @@ wheel: < name: "infra/python/wheels/colorama-py2_py3" version: "version:0.4.1" > + +wheel: < + name: "infra/python/wheels/google-cloud-monitoring-py2_py3" + version: "version:2.9.1" +> diff --git a/.yapfignore b/.yapfignore new file mode 100644 index 00000000000..32cd76bc07b --- /dev/null +++ b/.yapfignore @@ -0,0 +1,3 @@ +# Generated protobuf code. +*_pb2.py +*_pb2_grpc.py diff --git a/AUTHORS b/AUTHORS index 89377ead490..21e6d09c6b5 100644 --- a/AUTHORS +++ b/AUTHORS @@ -9,6 +9,8 @@ # See python fnmatch module documentation for more information. Anton Zub +Ho Cheung +Jincheol Jo Kris Selden Maciek Weksej Magda Zawora diff --git a/BUILD.gn b/BUILD.gn index 04e01dc63fc..fb61e0e019c 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -29,10 +29,11 @@ group("telemetry_chrome_test_support") { "catapult_build/", "dashboard/", "dependency_manager/", - "firefighter/", "hooks/", "infra/", "netlog_viewer/", + "perf_issue_service/", + "skia_bridge/", "systrace/", "trace_processor/", "web_page_replay_go/", @@ -60,10 +61,12 @@ group("telemetry_chrome_test_support") { "third_party/certifi/", "third_party/chai/", "third_party/chardet/", + "third_party/click/", "third_party/cloudstorage/", "third_party/coverage/", "third_party/d3/", "third_party/depot_tools/", + "third_party/flask/", "third_party/flot/", "third_party/gae_ts_mon/", "third_party/google-auth/", @@ -74,9 +77,12 @@ group("telemetry_chrome_test_support") { "third_party/idb/", "third_party/idna/", "third_party/ijson/", + "third_party/itsdangerous/", + "third_party/jinja2/", "third_party/jquery/", "third_party/jszip/", "third_party/mapreduce/", + "third_party/markupsafe/", "third_party/mocha/", "third_party/mock/", "third_party/mox3/", @@ -89,7 +95,6 @@ group("telemetry_chrome_test_support") { "third_party/pyasn1_modules/", "third_party/pyfakefs/", "third_party/pyparsing/", - "third_party/pyserial/", "third_party/python_gflags/", "third_party/redux/", "third_party/requests/", @@ -97,13 +102,13 @@ group("telemetry_chrome_test_support") { "third_party/rsa/", "third_party/six/", "third_party/snap-it/", - "third_party/tsmon_client/", "third_party/tsproxy/", "third_party/uritemplate/", "third_party/urllib3/", "third_party/webapp2/", "third_party/webencodings-0.5.1/", "third_party/webtest/", + "third_party/werkzeug/", ] data_deps += [ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fc690d39638..c4685d66f32 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ # Code of Conduct We follow the [Chromium code of conduct]( -https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md) in +https://chromium.googlesource.com/chromium/src/+/main/CODE_OF_CONDUCT.md) in our our repos and organizations, mailing lists, and other communications. # Issues @@ -34,7 +34,7 @@ You can then create a local branch, make and commit your change. ``` cd catapult -git checkout -t -b foo origin/master +git checkout -t -b foo origin/main ... edit files ... git commit -a -m "New files" ``` @@ -57,7 +57,7 @@ Then, submit your changes through the commit queue by checking the "Commit" box. Once everything is landed, you can cleanup your branch. ``` -git checkout master +git checkout main git branch -D foo ``` diff --git a/OWNERS b/OWNERS index d8ecbdffbc3..d804b055a8e 100644 --- a/OWNERS +++ b/OWNERS @@ -1,8 +1,6 @@ -abennetts@google.com bsheedy@chromium.org dproy@chromium.org -fmmirzaei@google.com -heiserya@google.com johnchen@chromium.org seanmccullough@google.com wenbinzhang@google.com +maxqli@google.com diff --git a/PRESUBMIT.py b/PRESUBMIT.py index 70bd88b87c1..0c766750c03 100644 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -7,9 +7,12 @@ See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into depot_tools. """ + import re import sys +USE_PYTHON3 = True + _EXCLUDED_PATHS = ( r'(.*[\\/])?\.git[\\/].*', r'.+\.png$', @@ -28,13 +31,15 @@ r'^dashboard[\\/]dashboard[\\/]api[\\/]examples[\\/].*.js', r'^dashboard[\\/]dashboard[\\/]templates[\\/].*', r'^dashboard[\\/]dashboard[\\/]sheriff_config[\\/].*_pb2.py$', + r'^dashboard[\\/]sandwich_verification[\\/]cabe/proto/v1[\\/].*_pb2.py$', + r'^dashboard[\\/]sandwich_verification[\\/]cabe/proto/v1[\\/].*_pb2_grpc.py$', # pylint: disable=line-too-long r'^experimental[\\/]heatmap[\\/].*', r'^experimental[\\/]trace_on_tap[\\/]third_party[\\/].*', r'^experimental[\\/]perf_sheriffing_emailer[\\/].*.js', r'^perf_insights[\\/]test_data[\\/].*', r'^perf_insights[\\/]third_party[\\/].*', r'^telemetry[\\/]third_party[\\/].*', - r'^third_party[\\/].*', + r'.*third_party[\\/].*', r'^tracing[\\/]\.allow-devtools-save$', r'^tracing[\\/]bower\.json$', r'^tracing[\\/]\.bowerrc$', @@ -47,16 +52,17 @@ _GITHUB_BUG_ID_RE = re.compile(r'#[1-9]\d*') -_MONORAIL_BUG_ID_RE = re.compile(r'[1-9]\d*') -_MONORAIL_PROJECT_NAMES = frozenset({'chromium', 'v8', 'angleproject', 'skia'}) +_NUMERAL_BUG_ID_RE = re.compile(r'[1-9]\d*') +_MONORAIL_PROJECT_NAMES = frozenset( + {'chromium', 'v8', 'angleproject', 'skia', 'dawn'}) def CheckChangeLogBug(input_api, output_api): # Show a presubmit message if there is no Bug line or an empty Bug line. if not input_api.change.BugsFromDescription(): return [output_api.PresubmitNotifyResult( - 'If this change has associated bugs on GitHub or Monorail, add a ' - '"Bug: (, )*" line to the patch description where can ' - 'be one of the following: catapult:#NNNN, ' + + 'If this change has associated bugs on GitHub, Issuetracker or ' + 'Monorail, add a "Bug: (, )*" line to the patch description ' + 'where can be one of the following: catapult:#NNNN, b:NNNNNN, ' + ', '.join('%s:NNNNNN' % n for n in _MONORAIL_PROJECT_NAMES) + '.')] # Check that each bug in the BUG= line has the correct format. @@ -79,8 +85,13 @@ def CheckChangeLogBug(input_api, output_api): 'repository should be provided in the ' '"catapult:#NNNN" format.' % bug) catapult_bug_provided = True + elif project_name == 'b': + if not _NUMERAL_BUG_ID_RE.match(bug_id): + error_messages.append('Invalid bug "%s". Bugs in the Issuetracker ' + 'should be provided in the ' + '"b:NNNNNN" format.' % bug) elif project_name in _MONORAIL_PROJECT_NAMES: - if not _MONORAIL_BUG_ID_RE.match(bug_id): + if not _NUMERAL_BUG_ID_RE.match(bug_id): error_messages.append('Invalid bug "%s". Bugs in the Monorail %s ' 'project should be provided in the ' '"%s:NNNNNN" format.' % (bug, project_name, @@ -126,7 +137,7 @@ def CheckChangeOnUpload(input_api, output_api): results = CheckChange(input_api, output_api) cwd = input_api.PresubmitLocalPath() exit_code = input_api.subprocess.call( - [input_api.python_executable, 'generate_telemetry_build.py', '--check'], + [input_api.python3_executable, 'generate_telemetry_build.py', '--check'], cwd=cwd) if exit_code != 0: results.append(output_api.PresubmitError( diff --git a/bin/run_dev_server b/bin/run_dev_server index b1cb6c0a305..3d5c25776ac 100755 --- a/bin/run_dev_server +++ b/bin/run_dev_server @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/bin/run_tests b/bin/run_tests index 29e5fccc363..0247e169ffa 100755 --- a/bin/run_tests +++ b/bin/run_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/catapult_build/PRESUBMIT.py b/catapult_build/PRESUBMIT.py index 93625db85d3..507373a9cce 100644 --- a/catapult_build/PRESUBMIT.py +++ b/catapult_build/PRESUBMIT.py @@ -3,6 +3,9 @@ # found in the LICENSE file. +USE_PYTHON3 = True + + def CheckChangeOnUpload(input_api, output_api): return _CommonChecks(input_api, output_api) @@ -15,7 +18,7 @@ def _CommonChecks(input_api, output_api): results = [] results += input_api.RunTests(input_api.canned_checks.GetPylint( input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api), - pylintrc='../pylintrc')) + pylintrc='../pylintrc', version='2.7')) return results diff --git a/catapult_build/__init__.py b/catapult_build/__init__.py index 5ec82ffc84c..1e64bc9d5df 100644 --- a/catapult_build/__init__.py +++ b/catapult_build/__init__.py @@ -17,13 +17,12 @@ def _UpdateSysPathIfNeeded(): catapult_path, 'third_party')) _AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'py_utils')) _AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'py_vulcanize')) - if sys.version_info.major == 2: - _AddToPathIfNeeded( - os.path.join(catapult_third_party_path, 'beautifulsoup4')) - else: - _AddToPathIfNeeded( - os.path.join(catapult_third_party_path, 'beautifulsoup4-4.9.3', 'py3k')) - _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'html5lib-python')) + _AddToPathIfNeeded( + os.path.join(catapult_third_party_path, 'beautifulsoup4-4.9.3', 'py3k')) + _AddToPathIfNeeded( + os.path.join(catapult_third_party_path, 'html5lib-1.1')) + _AddToPathIfNeeded( + os.path.join(catapult_third_party_path, 'webencodings-0.5.1')) _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'six')) _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'Paste')) _AddToPathIfNeeded(os.path.join(catapult_third_party_path, 'webapp2')) diff --git a/catapult_build/appengine_deploy.py b/catapult_build/appengine_deploy.py index 8c2732eacc8..c8479589df5 100644 --- a/catapult_build/appengine_deploy.py +++ b/catapult_build/appengine_deploy.py @@ -2,6 +2,8 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from __future__ import absolute_import +from __future__ import print_function import os import subprocess import sys @@ -21,7 +23,7 @@ def Deploy(paths, args, version=None): version = _VersionName() with temp_deployment_dir.TempDeploymentDir( paths, use_symlinks=False) as temp_dir: - print 'Deploying from "%s".' % temp_dir + print('Deploying from "%s".' % temp_dir) # google-cloud-sdk/bin/gcloud is a shell script, which we can't subprocess # on Windows with shell=False. So, execute the Python script directly. @@ -30,9 +32,9 @@ def Deploy(paths, args, version=None): else: script_path = _FindScriptInPath('gcloud') if not script_path: - print 'This script requires the Google Cloud SDK to be in PATH.' - print 'Install at https://cloud.google.com/sdk and then run' - print '`gcloud components install app-engine-python`' + print('This script requires the Google Cloud SDK to be in PATH.') + print('Install at https://cloud.google.com/sdk and then run') + print('`gcloud components install app-engine-python`') sys.exit(1) subprocess.check_call([script_path, 'app', 'deploy', '--no-promote', diff --git a/catapult_build/appengine_dev_server.py b/catapult_build/appengine_dev_server.py index ad8582f0fed..ae3852eb3b1 100644 --- a/catapult_build/appengine_dev_server.py +++ b/catapult_build/appengine_dev_server.py @@ -3,6 +3,8 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +from __future__ import absolute_import +from __future__ import print_function import argparse import os import os.path @@ -24,11 +26,11 @@ def DevAppserver(paths, args, reuse_path=None): """ with temp_deployment_dir.TempDeploymentDir( paths, reuse_path=reuse_path) as temp_dir: - print 'Running dev server on "%s".' % temp_dir + print('Running dev server on "%s".' % temp_dir) script_path = _FindScriptInPath('dev_appserver.py') if not script_path: - print 'This script requires the App Engine SDK to be in PATH.' + print('This script requires the App Engine SDK to be in PATH.') sys.exit(1) subprocess.call([sys.executable, script_path] + @@ -48,10 +50,13 @@ def _AddTempDirToYamlPathArgs(temp_dir, args): """Join `temp_dir` to the positional args, preserving the other args.""" parser = argparse.ArgumentParser() parser.add_argument('yaml_path', nargs='*') + parser.add_argument('--run_pinpoint', default=False, action='store_true') options, remaining_args = parser.parse_known_args(args) yaml_path_args = [ os.path.join(temp_dir, yaml_path) for yaml_path in options.yaml_path ] if not yaml_path_args: + if options.run_pinpoint: + temp_dir += '/pinpoint.yaml' yaml_path_args = [temp_dir] return yaml_path_args + remaining_args diff --git a/catapult_build/bin/run_py_tests b/catapult_build/bin/run_py_tests index 97ccbdf2a12..7b388f667fe 100755 --- a/catapult_build/bin/run_py_tests +++ b/catapult_build/bin/run_py_tests @@ -1,5 +1,5 @@ -#!/usr/bin/env python -# Copyright (c) 2015 The Chromium Authors. All rights reserved. +#!/usr/bin/env vpython3 +# Copyright (c) 2023 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -23,7 +23,7 @@ if __name__ == '__main__': # For some reason the value of PYTHONPATH on try bot will cause import error # when the test is invoked by vpython3. Removing the value as a workaround. - if sys.version_info.major == 3 and 'PYTHONPATH' in os.environ: + if 'PYTHONPATH' in os.environ: os.environ.pop('PYTHONPATH') from catapult_build import run_with_typ diff --git a/catapult_build/build_steps.py b/catapult_build/build_steps.py index 84df5ac1e18..66de9a52be3 100644 --- a/catapult_build/build_steps.py +++ b/catapult_build/build_steps.py @@ -50,6 +50,14 @@ }, ] +_PERF_ISSUE_SERVICE_TESTS = [ + { + 'name': 'Perf Issue Service Python Tests', + 'path': 'perf_issue_service/tests/bin/run_py_tests', + 'disabled': ['android', 'win', 'mac'], + } +] + _CATAPULT_TESTS = [ { 'name': 'Build Python Tests', @@ -71,7 +79,7 @@ }, { 'name': 'Devil Python Tests', - 'path': 'devil/bin/run_py_tests', + 'path': 'devil/bin/run_py3_tests', 'disabled': ['mac', 'win'], }, { @@ -95,6 +103,7 @@ 'additional_args': ['--browser=reference',], 'uses_sandbox_env': True, 'disabled': ['android'], + 'python_versions': [3], }, { 'name': 'Telemetry Tests with Stable Browser (Desktop)', @@ -106,6 +115,7 @@ ], 'uses_sandbox_env': True, 'disabled': ['android'], + 'python_versions': [3], }, { 'name': 'Telemetry Tests with Stable Browser (Android)', @@ -117,7 +127,8 @@ '-v', ], 'uses_sandbox_env': True, - 'disabled': ['win', 'mac', 'linux'] + 'disabled': ['win', 'mac', 'linux'], + 'python_versions': [3], }, { 'name': 'Telemetry Integration Tests with Stable Browser', @@ -129,6 +140,7 @@ ], 'uses_sandbox_env': True, 'disabled': ['android', 'linux'], # TODO(nedn): enable this on linux + 'python_versions': [3], }, { 'name': 'Tracing Dev Server Tests', @@ -203,6 +215,7 @@ def main(args=None): '--app-engine-sdk-pythonpath', help='PYTHONPATH to include app engine SDK path') parser.add_argument('--platform', help='Platform name (linux, mac, or win)') + parser.add_argument('--platform_arch', help='Platform arch (intel or arm)') parser.add_argument('--output-json', help='Output for buildbot status page') parser.add_argument( '--run_android_tests', default=True, help='Run Android tests') @@ -212,29 +225,32 @@ def main(args=None): help='Run only the Dashboard and Pinpoint tests', action='store_true') parser.add_argument( - '--use_python3', + '--perf_issue_service_only', default=False, - help='Run Catapult Tests using vpython3', + help='Run only the Perf Issue Service tests', action='store_true') args = parser.parse_args(args) - dashboard_protos_path = os.path.join(args.api_path_checkout, 'dashboard', - 'dashboard', 'proto') + dashboard_protos_folder = os.path.join(args.api_path_checkout, 'dashboard', + 'dashboard', 'protobuf') dashboard_proto_files = [ - os.path.join(dashboard_protos_path, p) + os.path.join(dashboard_protos_folder, p) for p in ['sheriff.proto', 'sheriff_config.proto'] ] + dashboard_protos_path = os.path.join(args.api_path_checkout, 'dashboard') + sheriff_proto_output_path = os.path.join(args.api_path_checkout, 'dashboard', 'dashboard', 'sheriff_config') dashboard_proto_output_path = os.path.join(args.api_path_checkout, - 'dashboard', 'dashboard') + 'dashboard') tracing_protos_path = os.path.join(args.api_path_checkout, 'tracing', 'tracing', 'proto') tracing_proto_output_path = tracing_protos_path tracing_proto_files = [os.path.join(tracing_protos_path, 'histogram.proto')] + protoc_path = 'protoc' steps = [ { @@ -243,7 +259,7 @@ def main(args=None): 'name': 'Remove Stale files', 'cmd': [ - 'python', + 'python3', os.path.join(args.api_path_checkout, 'catapult_build', 'remove_stale_files.py'), args.api_path_checkout, @@ -257,7 +273,7 @@ def main(args=None): 'name': 'Generate Sheriff Config protocol buffers', 'cmd': [ - 'protoc', + protoc_path, '--proto_path', dashboard_protos_path, '--python_out', @@ -268,7 +284,7 @@ def main(args=None): 'name': 'Generate Dashboard protocol buffers', 'cmd': [ - 'protoc', + protoc_path, '--proto_path', dashboard_protos_path, '--python_out', @@ -279,7 +295,7 @@ def main(args=None): 'name': 'Generate Tracing protocol buffers', 'cmd': [ - 'protoc', + protoc_path, '--proto_path', tracing_protos_path, '--python_out', @@ -296,7 +312,7 @@ def main(args=None): 'name': 'Android: Recover Devices', 'cmd': [ - 'python', + 'vpython3', os.path.join(args.api_path_checkout, 'devil', 'devil', 'android', 'tools', 'device_recovery.py') ], @@ -305,7 +321,7 @@ def main(args=None): 'name': 'Android: Provision Devices', 'cmd': [ - 'python', + 'vpython3', os.path.join(args.api_path_checkout, 'devil', 'devil', 'android', 'tools', 'provision_devices.py') ], @@ -314,7 +330,7 @@ def main(args=None): 'name': 'Android: Device Status', 'cmd': [ - 'python', + 'vpython3', os.path.join(args.api_path_checkout, 'devil', 'devil', 'android', 'tools', 'device_status.py') ], @@ -324,8 +340,11 @@ def main(args=None): tests = None if args.dashboard_only: tests = _DASHBOARD_TESTS + elif args.perf_issue_service_only: + tests = _PERF_ISSUE_SERVICE_TESTS else: tests = _CATAPULT_TESTS + for test in tests: if args.platform == 'android' and not args.run_android_tests: # Remove all the steps for the Android configuration if we're asked to not @@ -336,22 +355,11 @@ def main(args=None): if args.platform in test.get('disabled', []): continue - # The test "Devil Python Tests" has two executables, run_py_tests and - # run_py3_tests. Those scripts define the vpython interpreter on shebang, - # and will quit when running on unexpected version. This script assumes one - # path for each test and thus we will conditionally replace the script name - # until python 2 is fully dropped. - # here, test_path = test['path'] - if args.use_python3 and test['name'] == 'Devil Python Tests': - test_path = 'devil/bin/run_py3_tests' step = {'name': test['name'], 'env': {}} - if args.use_python3: - vpython_executable = "vpython3" - else: - vpython_executable = "vpython" + vpython_executable = "vpython3" if sys.platform == 'win32': vpython_executable += '.bat' @@ -371,14 +379,9 @@ def main(args=None): step['env']['CHROME_DEVEL_SANDBOX'] = '/opt/chromium/chrome_sandbox' if test.get('outputs_presentation_json'): step['outputs_presentation_json'] = True - # TODO(crbug/1221663): - # Before python 3 conversion is finished, the try jobs with use_python3 are - # experimental. We want to see all possible failure and thus we don't want - # to try job to quit before all tests are finished. - # This condition will be removed when the python 3 conversion is done. - if args.use_python3: - step['always_run'] = True + step['always_run'] = True steps.append(step) + with open(args.output_json, 'w') as outfile: json.dump(steps, outfile) diff --git a/catapult_build/dev_server.py b/catapult_build/dev_server.py index a8b25c4858e..e3fef45dcd5 100644 --- a/catapult_build/dev_server.py +++ b/catapult_build/dev_server.py @@ -137,6 +137,7 @@ def get(self, *args, **kwargs): # pylint: disable=unused-argument app.cache_control(no_cache=True) return app self.abort(404) + return None @staticmethod def GetServingPathForAbsFilename(source_paths, filename): @@ -162,7 +163,7 @@ def get(self, *args, **kwargs): # pylint: disable=unused-argument os.path.join(top_path, kwargs.pop('rest_of_path'))) if not joined_path.startswith(top_path): self.response.set_status(403) - return + return None app = FileAppWithGZipHandling(joined_path) app.cache_control(no_cache=True) return app diff --git a/catapult_build/perfbot_stats/chrome_perf_stats.py b/catapult_build/perfbot_stats/chrome_perf_stats.py index c015e305a90..0e10645ce04 100755 --- a/catapult_build/perfbot_stats/chrome_perf_stats.py +++ b/catapult_build/perfbot_stats/chrome_perf_stats.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2.7 +#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -23,6 +23,7 @@ import six.moves.urllib.request # pylint: disable=import-error import six.moves.urllib.parse # pylint: disable=import-error import six.moves.urllib.error # pylint: disable=import-error +from six.moves import range BUILDER_LIST_URL = ('https://chrome-infra-stats.appspot.com/' '_ah/api/stats/v1/masters/chromium.perf') @@ -134,7 +135,8 @@ def UploadToPerfDashboard(success_rates): } } url = 'https://chromeperf.appspot.com/add_point' - data = six.moves.urllib.parse.urlencode({'data': json.dumps(dashboard_data)}) + data = six.moves.urllib.parse.urlencode( + {'data': json.dumps(dashboard_data)}) six.moves.urllib.request.urlopen(url, data).read() @@ -146,7 +148,8 @@ def CalculateSuccessRates(year, month, days, builders): date_dict_str = '%d%02d%02d' % (year, month, day) for builder in builders: url = BUILDER_STATS_URL % ( - six.moves.urllib.parse.quote(builder), six.moves.urllib.parse.quote(date_str)) + six.moves.urllib.parse.quote(builder), + six.moves.urllib.parse.quote(date_str)) response = six.moves.urllib.request.urlopen(url) results = json.load(response) _UpdateSuccessRatesWithResult( diff --git a/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py b/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py index bb72bf19039..07372dfd79e 100644 --- a/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py +++ b/catapult_build/perfbot_stats/chrome_perf_stats_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2.7 +#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/catapult_build/perfbot_stats/chrome_perf_step_timings.py b/catapult_build/perfbot_stats/chrome_perf_step_timings.py index 30558abd1f6..05925bcf60b 100755 --- a/catapult_build/perfbot_stats/chrome_perf_step_timings.py +++ b/catapult_build/perfbot_stats/chrome_perf_step_timings.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2.7 +#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -13,12 +13,14 @@ base=https://chrome-infra-stats.appspot.com/_ah/api#p/ """ +from __future__ import print_function import csv import datetime import json import sys -import urllib -import urllib2 +import six.moves.urllib.request +import six.moves.urllib.parse +import six.moves.urllib.error BUILDER_STEPS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/' @@ -115,7 +117,7 @@ def main(): if len(sys.argv) != 2: - print USAGE + print(USAGE) sys.exit(0) outfilename = sys.argv[1] @@ -129,18 +131,20 @@ def main(): for builder in KNOWN_TESTERS_LIST: step_timings = [] - url = BUILDER_STEPS_URL % urllib.quote(builder) - response = urllib2.urlopen(url) + url = BUILDER_STEPS_URL % six.moves.urllib.parse.quote(builder) + response = six.moves.urllib.request.urlopen(url) results = json.load(response) steps = results['steps'] steps.sort() # to group tests and their references together. for step in steps: if step in IGNORED_STEPS: continue - url = STEP_ACTIVE_URL % (urllib.quote(builder), urllib.quote(step)) - response = urllib2.urlopen(url) + url = STEP_ACTIVE_URL % ( + six.moves.urllib.parse.quote(builder), + six.moves.urllib.parse.quote(step)) + response = six.moves.urllib.request.urlopen(url) results = json.load(response) - if ('step_records' not in results.keys() or + if ('step_records' not in list(results.keys()) or len(results['step_records']) == 0): continue first_record = results['step_records'][0] @@ -149,8 +153,10 @@ def main(): # ignore steps that did not run for more than 2 days if last_step_time < threshold_time: continue - url = STEP_STATS_URL % (urllib.quote(builder), urllib.quote(step)) - response = urllib2.urlopen(url) + url = STEP_STATS_URL % ( + six.moves.urllib.parse.quote(builder), + six.moves.urllib.parse.quote(step)) + response = six.moves.urllib.request.urlopen(url) results = json.load(response) step_timings.append( [builder, step, results['count'], results['stddev'], diff --git a/catapult_build/repo_checks.py b/catapult_build/repo_checks.py index 8fc765a88ee..52309ce50fc 100644 --- a/catapult_build/repo_checks.py +++ b/catapult_build/repo_checks.py @@ -13,5 +13,4 @@ def RunChecks(input_api, output_api): return [output_api.PresubmitError( 'Files with ".orig" suffix must not be checked into the ' 'repository:\n ' + '\n '.join(orig_files))] - else: - return [] + return [] diff --git a/catapult_build/run_dev_server_tests.py b/catapult_build/run_dev_server_tests.py index edc24d9661f..9d100bc65d0 100644 --- a/catapult_build/run_dev_server_tests.py +++ b/catapult_build/run_dev_server_tests.py @@ -133,24 +133,20 @@ def GetChromeInfo(args): raise ChromeNotFound('Could not find chrome locally. You can supply it ' 'manually using --chrome_path') return ChromeInfo(path=chrome_path, version=None) - else: - channel = args.channel - if sys.version_info.major == 3: - target = 'linux' - else: - target = 'linux2' - if sys.platform == target and channel == 'canary': - channel = 'dev' - assert channel in ['stable', 'beta', 'dev', 'canary'] - - binary = 'chrome' - print('Fetching the', channel, binary, 'binary via the binary_manager.') - chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG]) - os_name, arch = dependency_util.GetOSAndArchForCurrentDesktopPlatform() - chrome_path, version = chrome_manager.FetchPathWithVersion( - '%s_%s' % (binary, channel), os_name, arch) - print('Finished fetching the', binary, 'binary to', chrome_path) - return ChromeInfo(path=chrome_path, version=version) + channel = args.channel + target = 'linux' + if sys.platform == target and channel == 'canary': + channel = 'dev' + assert channel in ['stable', 'beta', 'dev', 'canary'] + + binary = 'chrome' + print('Fetching the', channel, binary, 'binary via the binary_manager.') + chrome_manager = binary_manager.BinaryManager([CHROME_BINARIES_CONFIG]) + os_name, arch = dependency_util.GetOSAndArchForCurrentDesktopPlatform() + chrome_path, version = chrome_manager.FetchPathWithVersion( + '%s_%s' % (binary, channel), os_name, arch) + print('Finished fetching the', binary, 'binary to', chrome_path) + return ChromeInfo(path=chrome_path, version=version) def KillProcess(process): @@ -210,6 +206,7 @@ def RunTests(args, chrome_path): '--enable-logging', '--v=1', '--enable-features=ForceWebRequestProxyForTest', '--force-device-scale-factor=1', + '--use-mock-keychain', ] if args.extra_chrome_args: chrome_command.extend(args.extra_chrome_args.strip('"').split(' ')) @@ -246,12 +243,11 @@ def KillServer(): if timed_out: print('Tests did not finish before', args.timeout_sec, 'seconds') return _TIMEOUT_RETURNCODE + if server_process.returncode == 0: + print("Tests passed in %.2f seconds." % (time.time() - test_start_time)) else: - if server_process.returncode == 0: - print("Tests passed in %.2f seconds." % (time.time() - test_start_time)) - else: - logging.error('Tests failed!') - return server_process.returncode + logging.error('Tests failed!') + return server_process.returncode finally: if timer: @@ -331,8 +327,7 @@ def Main(argv): if return_code == _TIMEOUT_RETURNCODE: attempts_left -= 1 continue - else: - break + break else: logging.error('Tests timed out every time. Retried %d times.', args.timeout_retries) diff --git a/catapult_build/run_with_typ.py b/catapult_build/run_with_typ.py index 543e4a2008a..b38c204241f 100644 --- a/catapult_build/run_with_typ.py +++ b/catapult_build/run_with_typ.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -24,7 +24,7 @@ def Run(top_level_dir, path=None, suffixes=None, **kwargs): typ_path = os.path.abspath(os.path.join( os.path.dirname(__file__), os.path.pardir, 'third_party', 'typ')) _AddToPathIfNeeded(typ_path) - import typ + import typ # pylint: disable=import-outside-toplevel return typ.main( top_level_dir=top_level_dir, path=(path or []), diff --git a/catapult_build/test_runner.py b/catapult_build/test_runner.py index 6a0299b5b6a..3b2bd267d0d 100644 --- a/catapult_build/test_runner.py +++ b/catapult_build/test_runner.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/bin/run_tests b/common/bin/run_tests index aab34cac89a..09f6d799909 100755 --- a/common/bin/run_tests +++ b/common/bin/run_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/bin/update_chrome_reference_binaries.py b/common/bin/update_chrome_reference_binaries.py index 2131aa66984..bafaa4f14df 100755 --- a/common/bin/update_chrome_reference_binaries.py +++ b/common/bin/update_chrome_reference_binaries.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be @@ -45,7 +45,8 @@ # Remove a platform name from this list to disable updating it. # Add one to enable updating it. (Must also update _PLATFORM_MAP.) -_PLATFORMS_TO_UPDATE = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64', +_PLATFORMS_TO_UPDATE = ['mac_arm64', 'mac_x86_64', 'win_x86', + 'win_AMD64', 'linux_x86_64', 'android_k_armeabi-v7a', 'android_l_arm64-v8a', 'android_l_armeabi-v7a', 'android_n_armeabi-v7a', 'android_n_arm64-v8a', 'android_n_bundle_armeabi-v7a', @@ -53,7 +54,8 @@ # Add platforms here if you also want to update chromium binary for it. # Must add chromium_info for it in _PLATFORM_MAP. -_CHROMIUM_PLATFORMS = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64'] +_CHROMIUM_PLATFORMS = ['mac_arm64', 'mac_x86_64', 'win_x86', 'win_AMD64', + 'linux_x86_64'] # Remove a channel name from this list to disable updating it. # Add one to enable updating it. @@ -62,7 +64,8 @@ # Omaha is Chrome's autoupdate server. It reports the current versions used # by each platform on each channel. -_OMAHA_PLATFORMS = { 'stable': ['mac', 'linux', 'win', 'android'], +_OMAHA_PLATFORMS = { 'stable': ['mac_arm64', 'mac', 'linux', 'win', + 'win64', 'android'], 'dev': ['linux'], 'canary': ['mac', 'win']} @@ -85,6 +88,15 @@ build_dir='Mac', zip_name='chrome-mac.zip'), zip_name='chrome-mac.zip'), + 'mac_arm64': UpdateInfo( + omaha='mac_arm64', + gs_folder='desktop-*', + gs_build='mac-arm64', + chromium_info=ChromiumInfo( + build_dir='Mac_Arm', + zip_name='chrome-mac.zip', + ), + zip_name='chrome-mac.zip'), 'win_x86': UpdateInfo( omaha='win', gs_folder='desktop-*', @@ -151,10 +163,8 @@ gs_build='arm_64', chromium_info=None, zip_name='Monochrome.apks') - } - VersionInfo = collections.namedtuple('VersionInfo', 'version, branch_base_position') @@ -173,7 +183,7 @@ def _ChannelVersionsMap(channel): def _OmahaReportVersionInfo(channel): url ='https://omahaproxy.appspot.com/all?channel=%s' % channel lines = six.moves.urllib.request.urlopen(url).readlines() - return [l.split(',') for l in lines] + return [six.ensure_str(l).split(',') for l in lines] def _OmahaVersionsMap(rows, channel): @@ -229,8 +239,8 @@ def _FindClosestChromiumSnapshot(base_position, build_dir): # positions between 123446 an 123466. We do this by getting all snapshots # with prefix 12344*, 12345*, and 12346*. This may get a few more snapshots # that we intended, but that's fine since we take the min distance anyways. - min_position_prefix = min_position / 10; - max_position_prefix = max_position / 10; + min_position_prefix = min_position // 10; + max_position_prefix = max_position // 10; available_positions = [] for position_prefix in range(min_position_prefix, max_position_prefix + 1): @@ -318,7 +328,7 @@ def _ModifyBuildIfNeeded(binary, location, platform): if binary != 'chrome': return - if platform == 'mac_x86_64': + if platform in ['mac_x86_64', 'mac_arm64']: _RemoveKeystoneFromBuild(location) return diff --git a/common/py_trace_event/bin/run_tests b/common/py_trace_event/bin/run_tests index b9e1cbe6f02..863803eecc3 100755 --- a/common/py_trace_event/bin/run_tests +++ b/common/py_trace_event/bin/run_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/py_trace_event/py_trace_event/setup.py b/common/py_trace_event/py_trace_event/setup.py index 06caf09c880..e57ea2ea2b2 100644 --- a/common/py_trace_event/py_trace_event/setup.py +++ b/common/py_trace_event/py_trace_event/setup.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py b/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py index e3494ff6d14..96e424bc678 100644 --- a/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py +++ b/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py @@ -32,7 +32,7 @@ def get_wrapper(func): category = "python" - arg_spec = inspect.getargspec(func) + arg_spec = inspect.getfullargspec(func) is_method = arg_spec.args and arg_spec.args[0] == "self" def arg_spec_tuple(name): diff --git a/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py b/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py index 08986f90caf..1ce466cf0a6 100644 --- a/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py +++ b/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py @@ -12,10 +12,11 @@ class ProcessSubclass(_RealProcess): def __init__(self, shim, *args, **kwards): + multiprocessing.get_context("forkserver") _RealProcess.__init__(self, *args, **kwards) self._shim = shim - def run(self,*args,**kwargs): + def run(self, *args, **kwargs): from . import log log._disallow_tracing_control() try: diff --git a/common/py_trace_event/py_trace_event/trace_event_unittest.py b/common/py_trace_event/py_trace_event/trace_event_unittest.py index ab6da5cd997..2b59cca157e 100644 --- a/common/py_trace_event/py_trace_event/trace_event_unittest.py +++ b/common/py_trace_event/py_trace_event/trace_event_unittest.py @@ -20,6 +20,12 @@ from py_utils import tempfile_ext +# Moving out for pickle serialization. +def child(resp): + # test tracing is not controllable in the child + resp.put(trace_event.is_tracing_controllable()) + + class TraceEventTests(unittest.TestCase): @contextlib.contextmanager @@ -397,10 +403,6 @@ def child_function(): @unittest.skipIf(sys.platform == 'win32', 'crbug.com/945819') def testTracingControlDisabledInChildButNotInParent(self): - def child(resp): - # test tracing is not controllable in the child - resp.put(trace_event.is_tracing_controllable()) - with self._test_trace(): q = multiprocessing.Queue() p = multiprocessing.Process(target=child, args=[q]) diff --git a/common/py_trace_event/third_party/protobuf/README.chromium b/common/py_trace_event/third_party/protobuf/README.chromium index adf4cb8a3a8..bf07354c411 100644 --- a/common/py_trace_event/third_party/protobuf/README.chromium +++ b/common/py_trace_event/third_party/protobuf/README.chromium @@ -2,6 +2,7 @@ Name: Protobuf URL: https://developers.google.com/protocol-buffers/ Version: 3.0.0 License: BSD +Shipped: yes Description: Protocol buffers are Google's language-neutral, platform-neutral, diff --git a/common/py_utils/PRESUBMIT.py b/common/py_utils/PRESUBMIT.py index c1d92fe0031..7b8510e31b1 100644 --- a/common/py_utils/PRESUBMIT.py +++ b/common/py_utils/PRESUBMIT.py @@ -1,6 +1,10 @@ # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +# pylint: disable=invalid-name + + +USE_PYTHON3 = True def CheckChangeOnUpload(input_api, output_api): @@ -15,7 +19,7 @@ def _CommonChecks(input_api, output_api): results = [] results += input_api.RunTests(input_api.canned_checks.GetPylint( input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api), - pylintrc='../../pylintrc')) + pylintrc='../../pylintrc', version='2.7')) return results diff --git a/common/py_utils/bin/run_tests b/common/py_utils/bin/run_tests index 66a4b5967ac..707e44a3826 100755 --- a/common/py_utils/bin/run_tests +++ b/common/py_utils/bin/run_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/py_utils/py_utils/__init__.py b/common/py_utils/py_utils/__init__.py index 303641acb80..b4c22a21627 100644 --- a/common/py_utils/py_utils/__init__.py +++ b/common/py_utils/py_utils/__init__.py @@ -34,12 +34,13 @@ def IsRunningOnCrosDevice(): def GetHostOsName(): if IsRunningOnCrosDevice(): return 'chromeos' - elif sys.platform.startswith('linux'): + if sys.platform.startswith('linux'): return 'linux' - elif sys.platform == 'darwin': + if sys.platform == 'darwin': return 'mac' - elif sys.platform == 'win32': + if sys.platform == 'win32': return 'win' + return None def GetHostArchName(): @@ -56,13 +57,12 @@ def IsExecutable(path): if os.path.isfile(path): if hasattr(os, 'name') and os.name == 'nt': return path.split('.')[-1].upper() in _ExecutableExtensions() - else: - return os.access(path, os.X_OK) - else: - return False + return os.access(path, os.X_OK) + return False def _AddDirToPythonPath(*path_parts): + # pylint: disable=no-value-for-parameter path = os.path.abspath(os.path.join(*path_parts)) if os.path.isdir(path) and path not in sys.path: # Some callsite that use telemetry assumes that sys.path[0] is the directory @@ -96,10 +96,7 @@ def Timeout(default_timeout): def TimeoutDeco(func, default_timeout): @functools.wraps(func) def RunWithTimeout(*args, **kwargs): - if 'timeout' in kwargs: - timeout = kwargs['timeout'] - else: - timeout = default_timeout + timeout = kwargs.get('timeout', default_timeout) try: return timeout_retry.Run(func, timeout, 0, args=args) except reraiser_thread.TimeoutError: @@ -156,4 +153,3 @@ class TimeoutException(Exception): It is possible that waiting for a longer period of time would result in a successful operation. """ - pass diff --git a/common/py_utils/py_utils/binary_manager.py b/common/py_utils/py_utils/binary_manager.py index 4ca601abb4a..51027d82d45 100644 --- a/common/py_utils/py_utils/binary_manager.py +++ b/common/py_utils/py_utils/binary_manager.py @@ -8,7 +8,7 @@ import dependency_manager -class BinaryManager(object): +class BinaryManager(): """ This class is effectively a subclass of dependency_manager, but uses a different number of arguments for FetchPath and LocalPath. """ @@ -59,4 +59,3 @@ def _WrapDependencyManagerFunction( 'Cannot find path for %s on platform %s. Falling back to %s.', binary_name, versioned_platform, platform) return function(binary_name, platform) - diff --git a/common/py_utils/py_utils/binary_manager_unittest.py b/common/py_utils/py_utils/binary_manager_unittest.py index ae2222a5b53..97cbe13be48 100644 --- a/common/py_utils/py_utils/binary_manager_unittest.py +++ b/common/py_utils/py_utils/binary_manager_unittest.py @@ -5,7 +5,6 @@ from __future__ import absolute_import import json import os -import six from pyfakefs import fake_filesystem_unittest from dependency_manager import exceptions @@ -15,15 +14,8 @@ class BinaryManagerTest(fake_filesystem_unittest.TestCase): # TODO(aiolos): disable cloud storage use during this test. - def assertCountEqualPy23(self, expected, actual): - if six.PY2: - self.assertItemsEqual(expected, actual) - else: - self.assertCountEqual(expected, actual) - def setUp(self): self.setUpPyfakefs() - # pylint: disable=bad-continuation self.expected_dependencies = { 'dep_1': { 'cloud_storage_base_folder': 'dependencies/fake_config', @@ -146,7 +138,6 @@ def setUp(self): } } } - # pylint: enable=bad-continuation fake_config = { 'config_type': 'BaseConfig', 'dependencies': self.expected_dependencies @@ -181,7 +172,7 @@ def testInitializationWithConfig(self): with self.assertRaises(ValueError): manager = binary_manager.BinaryManager(self.base_config) manager = binary_manager.BinaryManager([self.base_config]) - self.assertCountEqualPy23(self.expected_dependencies, + self.assertCountEqual(self.expected_dependencies, manager._dependency_manager._lookup_dict) def testSuccessfulFetchPathNoOsVersion(self): @@ -219,4 +210,3 @@ def testSuccessfulLocalPathOsVersion(self): manager = binary_manager.BinaryManager([self.base_config]) found_path = manager.LocalPath('dep_2', 'android', 'x86', 'l') self.assertEqual(self.expected_dep2_android_file, found_path) - diff --git a/common/py_utils/py_utils/camel_case.py b/common/py_utils/py_utils/camel_case.py index dbebb227384..dbe9943e804 100644 --- a/common/py_utils/py_utils/camel_case.py +++ b/common/py_utils/py_utils/camel_case.py @@ -18,17 +18,16 @@ def ToUnderscore(obj): if isinstance(obj, six.string_types): return re.sub('(?!^)([A-Z]+)', r'_\1', obj).lower() - elif isinstance(obj, list): + if isinstance(obj, list): return [ToUnderscore(item) for item in obj] - elif isinstance(obj, dict): + if isinstance(obj, dict): output = {} for k, v in six.iteritems(obj): - if isinstance(v, list) or isinstance(v, dict): + if isinstance(v, (list, dict)): output[ToUnderscore(k)] = ToUnderscore(v) else: output[ToUnderscore(k)] = v return output - else: - return obj + return obj diff --git a/common/py_utils/py_utils/chrome_binaries.json b/common/py_utils/py_utils/chrome_binaries.json index 03f0afd3476..04173ec6dae 100644 --- a/common/py_utils/py_utils/chrome_binaries.json +++ b/common/py_utils/py_utils/chrome_binaries.json @@ -88,6 +88,12 @@ "path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome", "version_in_cs": "83.0.4103.97" }, + "mac_arm64": { + "cloud_storage_hash": "8e5715ab04cdf366a20038b6d6aac2f41e12bbf0", + "download_path": "bin/reference_build/chrome-mac-arm64.zip", + "path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome", + "version_in_cs": "100.0.4896.75" + }, "win_AMD64": { "cloud_storage_hash": "e6515496ebab0d02a5294a008fe8bbf9dd3dbc0c", "download_path": "bin\\reference_build\\chrome-win64-clang.zip", @@ -154,6 +160,12 @@ "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium", "version_in_cs": "83.0.4103.97" }, + "mac_arm64": { + "cloud_storage_hash": "5fd942947943278bcf91b6f7ef85485ebfef3092", + "download_path": "bin/reference_build/chrome-mac-arm64.zip", + "path_within_archive": "chrome-mac/Chromium.app/Contents/MacOS/Chromium", + "version_in_cs": "100.0.4896.75" + }, "win_AMD64": { "cloud_storage_hash": "5e503f1bfeae37061ddc80ae1660a1c41594b188", "download_path": "bin\\reference_build\\chrome-win.zip", diff --git a/common/py_utils/py_utils/class_util_unittest.py b/common/py_utils/py_utils/class_util_unittest.py index 0e090e2af04..d04a12d9a1b 100644 --- a/common/py_utils/py_utils/class_util_unittest.py +++ b/common/py_utils/py_utils/class_util_unittest.py @@ -11,7 +11,7 @@ class ClassUtilTest(unittest.TestCase): def testClassOverridden(self): - class Parent(object): + class Parent(): def MethodShouldBeOverridden(self): pass @@ -23,7 +23,7 @@ def MethodShouldBeOverridden(self): Parent, Child, 'MethodShouldBeOverridden')) def testGrandchildOverridden(self): - class Parent(object): + class Parent(): def MethodShouldBeOverridden(self): pass @@ -38,7 +38,7 @@ def MethodShouldBeOverridden(self): Parent, Grandchild, 'MethodShouldBeOverridden')) def testClassNotOverridden(self): - class Parent(object): + class Parent(): def MethodShouldBeOverridden(self): pass @@ -50,7 +50,7 @@ def SomeOtherMethod(self): Parent, Child, 'MethodShouldBeOverridden')) def testGrandchildNotOverridden(self): - class Parent(object): + class Parent(): def MethodShouldBeOverridden(self): pass @@ -66,7 +66,7 @@ def SomeOtherMethod(self): Parent, Grandchild, 'MethodShouldBeOverridden')) def testClassNotPresentInParent(self): - class Parent(object): + class Parent(): def MethodShouldBeOverridden(self): pass @@ -79,7 +79,7 @@ def MethodShouldBeOverridden(self): Parent, Child, 'WrongMethod') def testInvalidClass(self): - class Foo(object): + class Foo(): def Bar(self): pass @@ -90,18 +90,18 @@ def Bar(self): AssertionError, class_util.IsMethodOverridden, Foo, 'invalid', 'Bar') def testMultipleInheritance(self): - class Aaa(object): + class Aaa(): def One(self): pass - class Bbb(object): + class Bbb(): def Two(self): pass class Ccc(Aaa, Bbb): pass - class Ddd(object): + class Ddd(): def Three(self): pass @@ -113,7 +113,7 @@ class Fff(Ccc, Eee): def One(self): pass - class Ggg(object): + class Ggg(): def Four(self): pass @@ -135,5 +135,3 @@ class Jjj(Iii): self.assertTrue(class_util.IsMethodOverridden(Bbb, Hhh, 'Two')) self.assertTrue(class_util.IsMethodOverridden(Bbb, Jjj, 'Two')) self.assertFalse(class_util.IsMethodOverridden(Eee, Fff, 'Three')) - - diff --git a/common/py_utils/py_utils/cloud_storage.py b/common/py_utils/py_utils/cloud_storage.py index 0dcffdcdea3..3a36a573d8d 100644 --- a/common/py_utils/py_utils/cloud_storage.py +++ b/common/py_utils/py_utils/cloud_storage.py @@ -67,19 +67,24 @@ class CloudStorageError(Exception): @staticmethod def _GetConfigInstructions(): - command = _GSUTIL_PATH + boto_command = ('export BOTO_CONFIG=$(gcloud info --format ' + '"value(config.paths.global_config_dir)")/legacy_credentials/' + '$(gcloud config list --format="value(core.account)")/.boto') + retval = ('To configure your credentials, run \n%s\n' + 'Next run "gcloud auth login" and follow its instructions.\n' + 'To make this change persistent, add the export BOTO_CONFIG\n' + 'command to your ~/.bashrc file.' % boto_command) if py_utils.IsRunningOnCrosDevice(): - command = 'HOME=%s %s' % (_CROS_GSUTIL_HOME_WAR, _GSUTIL_PATH) - return ('To configure your credentials:\n' - ' 1. Run "%s config" and follow its instructions.\n' - ' 2. If you have a @google.com account, use that account.\n' - ' 3. For the project-id, just enter 0.' % command) + retval += ('If running on Chrome OS, gcloud auth login may require\n' + 'setting the home directory to HOME=%s' + % _CROS_GSUTIL_HOME_WAR) + return retval -class PermissionError(CloudStorageError): +class CloudStoragePermissionError(CloudStorageError): def __init__(self): - super(PermissionError, self).__init__( + super().__init__( 'Attempted to access a file from Cloud Storage but you don\'t ' 'have permission. ' + self._GetConfigInstructions()) @@ -87,7 +92,7 @@ def __init__(self): class CredentialsError(CloudStorageError): def __init__(self): - super(CredentialsError, self).__init__( + super().__init__( 'Attempted to access a file from Cloud Storage but you have no ' 'configured credentials. ' + self._GetConfigInstructions()) @@ -139,13 +144,9 @@ def _RunCommand(args): elif _IsRunningOnSwarming(): gsutil_env = os.environ.copy() - if os.name == 'nt': - # If Windows, prepend python. Python scripts aren't directly executable. - args = [sys.executable, _GSUTIL_PATH] + args - else: - # Don't do it on POSIX, in case someone is using a shell script to redirect. - args = [_GSUTIL_PATH] + args - _EnsureExecutable(_GSUTIL_PATH) + # Always prepend executable to take advantage of vpython following advice of: + # https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md + args = [sys.executable, _GSUTIL_PATH] + args if args[0] not in ('help', 'hash', 'version') and not IsNetworkIOEnabled(): raise CloudStorageIODisabled( @@ -171,7 +172,7 @@ def GetErrorObjectForCloudStorageStderr(stderr): if ('status=403' in stderr or 'status 403' in stderr or '403 Forbidden' in stderr or re.match('.*403.*does not have .* access to .*', stderr)): - return PermissionError() + return CloudStoragePermissionError() if (stderr.startswith('InvalidUriError') or 'No such object' in stderr or 'No URLs matched' in stderr or 'One or more URLs matched no' in stderr): return NotFoundError(stderr) @@ -222,6 +223,39 @@ def List(bucket, prefix=None): return [url[len(bucket_prefix):] for url in stdout.splitlines()] +def ListFiles(bucket, path='', sort_by='name'): + """Returns files matching the given path in bucket. + + Args: + bucket: Name of cloud storage bucket to look at. + path: Path within the bucket to filter to. Path can include wildcards. + sort_by: 'name' (default), 'time' or 'size'. + + Returns: + A sorted list of files. + """ + bucket_prefix = 'gs://%s' % bucket + full_path = '%s/%s' % (bucket_prefix, path) + stdout = _RunCommand(['ls', '-l', '-d', full_path]) + + # Filter out directories and the summary line. + file_infos = [line.split(None, 2) for line in stdout.splitlines() + if len(line) > 0 and not line.startswith("TOTAL") + and not line.endswith('/')] + + # The first field in the info is size, the second is time, the third is name. + if sort_by == 'size': + file_infos.sort(key=lambda info: int(info[0])) + elif sort_by == 'time': + file_infos.sort(key=lambda info: info[1]) + elif sort_by == 'name': + file_infos.sort(key=lambda info: info[2]) + else: + raise ValueError("Wrong sort_by value: %s" % sort_by) + + return [url[len(bucket_prefix):] for _, _, url in file_infos] + + def ListDirs(bucket, path=''): """Returns only directories matching the given path in bucket. @@ -253,6 +287,7 @@ def ListDirs(bucket, path=''): dirs.append(url[len(bucket_prefix):]) return dirs + def Exists(bucket, remote_path): try: _RunCommand(['ls', 'gs://%s/%s' % (bucket, remote_path)]) @@ -433,7 +468,7 @@ def Insert(bucket, remote_path, local_path, publicly_readable=False): return cloud_filepath.view_url -class CloudFilepath(object): +class CloudFilepath(): def __init__(self, bucket, remote_path): self.bucket = bucket self.remote_path = remote_path @@ -441,7 +476,7 @@ def __init__(self, bucket, remote_path): @property def view_url(self): """Get a human viewable url for the cloud file.""" - return 'https://console.developers.google.com/m/cloudstorage/b/%s/o/%s' % ( + return 'https://storage.cloud.google.com/%s/%s' % ( self.bucket, self.remote_path) @property diff --git a/common/py_utils/py_utils/cloud_storage_unittest.py b/common/py_utils/py_utils/cloud_storage_unittest.py index 600c7c26023..7c02debbdb7 100644 --- a/common/py_utils/py_utils/cloud_storage_unittest.py +++ b/common/py_utils/py_utils/cloud_storage_unittest.py @@ -74,7 +74,8 @@ def testRunCommandCredentialsError(self): def testRunCommandPermissionError(self): strs = ['status=403', 'status 403', '403 Forbidden'] - self._AssertRunCommandRaisesError(strs, cloud_storage.PermissionError) + self._AssertRunCommandRaisesError( + strs, cloud_storage.CloudStoragePermissionError) def testRunCommandNotFoundError(self): strs = ['InvalidUriError', 'No such object', 'No URLs matched', @@ -97,8 +98,8 @@ def testInsertCreatesValidCloudUrl(self): local_path = 'test-local-path.html' cloud_url = cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET, remote_path, local_path) - self.assertEqual('https://console.developers.google.com/m/cloudstorage' - '/b/chromium-telemetry/o/test-remote-path.html', + self.assertEqual('https://storage.cloud.google.com' + '/chromium-telemetry/test-remote-path.html', cloud_url) finally: cloud_storage._RunCommand = orig_run_command @@ -111,8 +112,8 @@ def testUploadCreatesValidCloudUrls(self): local_path = 'test-local-path.html' cloud_filepath = cloud_storage.Upload( cloud_storage.PUBLIC_BUCKET, remote_path, local_path) - self.assertEqual('https://console.developers.google.com/m/cloudstorage' - '/b/chromium-telemetry/o/test-remote-path.html', + self.assertEqual('https://storage.cloud.google.com' + '/chromium-telemetry/test-remote-path.html', cloud_filepath.view_url) self.assertEqual('gs://chromium-telemetry/test-remote-path.html', cloud_filepath.fetch_url) @@ -200,6 +201,48 @@ def testListDirs(self, mock_run_command): self.assertEqual(cloud_storage.ListDirs('bucket', 'foo*'), ['/foo1/', '/foo2/']) + @mock.patch('py_utils.cloud_storage._RunCommand') + def testListFilesSortByName(self, mock_run_command): + mock_run_command.return_value = '\n'.join([ + ' 11 2022-01-01T16:05:16Z gs://bucket/foo/c.txt', + ' 5 2022-03-03T16:05:16Z gs://bucket/foo/a.txt', + '', + ' gs://bucket/foo/bar/', + ' 1 2022-02-02T16:05:16Z gs://bucket/foo/bar/b.txt', + 'TOTAL: 3 objects, 17 bytes (17 B)', + ]) + + self.assertEqual(cloud_storage.ListFiles('bucket', 'foo/*', sort_by='name'), + ['/foo/a.txt', '/foo/bar/b.txt', '/foo/c.txt']) + + @mock.patch('py_utils.cloud_storage._RunCommand') + def testListFilesSortByTime(self, mock_run_command): + mock_run_command.return_value = '\n'.join([ + ' 11 2022-01-01T16:05:16Z gs://bucket/foo/c.txt', + ' 5 2022-03-03T16:05:16Z gs://bucket/foo/a.txt', + '', + ' gs://bucket/foo/bar/', + ' 1 2022-02-02T16:05:16Z gs://bucket/foo/bar/b.txt', + 'TOTAL: 3 objects, 17 bytes (17 B)', + ]) + + self.assertEqual(cloud_storage.ListFiles('bucket', 'foo/*', sort_by='time'), + ['/foo/c.txt', '/foo/bar/b.txt', '/foo/a.txt']) + + @mock.patch('py_utils.cloud_storage._RunCommand') + def testListFilesSortBySize(self, mock_run_command): + mock_run_command.return_value = '\n'.join([ + ' 11 2022-01-01T16:05:16Z gs://bucket/foo/c.txt', + ' 5 2022-03-03T16:05:16Z gs://bucket/foo/a.txt', + '', + ' gs://bucket/foo/bar/', + ' 1 2022-02-02T16:05:16Z gs://bucket/foo/bar/b.txt', + 'TOTAL: 3 objects, 17 bytes (17 B)', + ]) + + self.assertEqual(cloud_storage.ListFiles('bucket', 'foo/*', sort_by='size'), + ['/foo/bar/b.txt', '/foo/a.txt', '/foo/c.txt']) + @mock.patch('py_utils.cloud_storage.subprocess.Popen') def testSwarmingUsesExistingEnv(self, mock_popen): os.environ['SWARMING_HEADLESS'] = '1' @@ -246,12 +289,12 @@ def CleanTimeStampFile(): class GetIfChangedTests(BaseFakeFsUnitTest): def setUp(self): - super(GetIfChangedTests, self).setUp() + super().setUp() self._orig_read_hash = cloud_storage.ReadHash self._orig_calculate_hash = cloud_storage.CalculateHash def tearDown(self): - super(GetIfChangedTests, self).tearDown() + super().tearDown() cloud_storage.CalculateHash = self._orig_calculate_hash cloud_storage.ReadHash = self._orig_read_hash @@ -430,4 +473,4 @@ def runTest(self): cloud_storage.CredentialsError) self.assertIsInstance(cloud_storage.GetErrorObjectForCloudStorageStderr( '403 Caller does not have storage.objects.list access to bucket ' - 'chrome-telemetry'), cloud_storage.PermissionError) + 'chrome-telemetry'), cloud_storage.CloudStoragePermissionError) diff --git a/common/py_utils/py_utils/contextlib_ext.py b/common/py_utils/py_utils/contextlib_ext.py index 922d27d548b..44ccf9e7ec8 100644 --- a/common/py_utils/py_utils/contextlib_ext.py +++ b/common/py_utils/py_utils/contextlib_ext.py @@ -3,7 +3,7 @@ # found in the LICENSE file. -class _OptionalContextManager(object): +class _OptionalContextManager(): def __init__(self, manager, condition): self._manager = manager @@ -30,4 +30,3 @@ def Optional(manager, condition): A context manager that conditionally executes the given manager. """ return _OptionalContextManager(manager, condition) - diff --git a/common/py_utils/py_utils/contextlib_ext_unittest.py b/common/py_utils/py_utils/contextlib_ext_unittest.py index 6488f3dcba8..be40e7b641f 100644 --- a/common/py_utils/py_utils/contextlib_ext_unittest.py +++ b/common/py_utils/py_utils/contextlib_ext_unittest.py @@ -10,7 +10,7 @@ class OptionalUnittest(unittest.TestCase): - class SampleContextMgr(object): + class SampleContextMgr(): def __init__(self): self.entered = False diff --git a/common/py_utils/py_utils/dependency_util.py b/common/py_utils/py_utils/dependency_util.py index 2a3108ad023..e7b3345f96f 100644 --- a/common/py_utils/py_utils/dependency_util.py +++ b/common/py_utils/py_utils/dependency_util.py @@ -40,7 +40,7 @@ def GetChromeApkOsVersion(version_name): 'First character of versions name %s was not an uppercase letter.') if version < 'L': return 'k' - elif version > 'M': + if version > 'M': return 'n' return 'l' diff --git a/common/py_utils/py_utils/discover.py b/common/py_utils/py_utils/discover.py index b9ab028a5c8..7eac0c545a8 100644 --- a/common/py_utils/py_utils/discover.py +++ b/common/py_utils/py_utils/discover.py @@ -175,6 +175,7 @@ def IsDirectlyConstructable(cls): # Case |class A(object): pass|. return True # Case |class (object):| with |__init__| other than |object.__init__|. + # pylint: disable=deprecated-method args, _, _, defaults = inspect.getargspec(cls.__init__) if defaults is None: defaults = () diff --git a/common/py_utils/py_utils/discover_unittest.py b/common/py_utils/py_utils/discover_unittest.py index bdc50b2fff9..ec76df18142 100644 --- a/common/py_utils/py_utils/discover_unittest.py +++ b/common/py_utils/py_utils/discover_unittest.py @@ -113,26 +113,25 @@ def testDiscoverClassesWithPatternAndIndexByClassName(self): } self.assertEqual(actual_classes, expected_classes) - -class ClassWithoutInitDefOne: # pylint: disable=old-style-class, no-init +class ClassWithoutInitDefOne(): pass -class ClassWithoutInitDefTwo(object): +class ClassWithoutInitDefTwo(): pass -class ClassWhoseInitOnlyHasSelf(object): +class ClassWhoseInitOnlyHasSelf(): def __init__(self): pass -class ClassWhoseInitWithDefaultArguments(object): +class ClassWhoseInitWithDefaultArguments(): def __init__(self, dog=1, cat=None, cow=None, fud='a'): pass -class ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments(object): +class ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments(): def __init__(self, x, dog=1, cat=None, fish=None, fud='a'): pass diff --git a/common/py_utils/py_utils/exc_util_unittest.py b/common/py_utils/py_utils/exc_util_unittest.py index a2157e4f1c0..3b1f1574a19 100644 --- a/common/py_utils/py_utils/exc_util_unittest.py +++ b/common/py_utils/py_utils/exc_util_unittest.py @@ -6,7 +6,6 @@ import re import sys import unittest -import six from py_utils import exc_util @@ -27,7 +26,7 @@ class FakeCleanupError(Exception): pass -class FaultyClient(object): +class FaultyClient(): def __init__(self, *args): self.failures = set(args) self.called = set() @@ -56,18 +55,13 @@ def Cleanup(self): class ReraiseTests(unittest.TestCase): - def assertCountEqualPy23(self, expected, actual): - if six.PY2: - self.assertItemsEqual(expected, actual) - else: - self.assertCountEqual(expected, actual) # pylint: disable=no-member def assertLogMatches(self, pattern): - self.assertRegexpMatches( + self.assertRegex( sys.stderr.getvalue(), pattern) # pylint: disable=no-member def assertLogNotMatches(self, pattern): - self.assertNotRegexpMatches( + self.assertNotRegex( sys.stderr.getvalue(), pattern) # pylint: disable=no-member def testTryRaisesExceptRaises(self): @@ -86,7 +80,7 @@ def testTryRaisesExceptRaises(self): r'While handling a FakeConnectionError, .* was also raised:\n' r'.*' r'FakeDisconnectionError: Oops!\n', re.DOTALL)) - self.assertCountEqualPy23(client.called, ['Connect', 'Disconnect']) + self.assertCountEqual(client.called, ['Connect', 'Disconnect']) def testTryRaisesExceptDoesnt(self): client = FaultyClient(FakeConnectionError) @@ -101,7 +95,7 @@ def testTryRaisesExceptDoesnt(self): raise self.assertLogNotMatches('FakeDisconnectionError') - self.assertCountEqualPy23(client.called, ['Connect', 'Disconnect']) + self.assertCountEqual(client.called, ['Connect', 'Disconnect']) def testTryPassesNoException(self): client = FaultyClient(FakeDisconnectionError) @@ -116,7 +110,7 @@ def testTryPassesNoException(self): self.assertLogNotMatches('FakeConnectionError') self.assertLogNotMatches('FakeDisconnectionError') - self.assertCountEqualPy23(client.called, ['Connect']) + self.assertCountEqual(client.called, ['Connect']) def testTryRaisesFinallyRaises(self): worker = FaultyClient(FakeProcessingError, FakeCleanupError) @@ -125,8 +119,6 @@ def testTryRaisesFinallyRaises(self): with self.assertRaises(FakeProcessingError): try: worker.Process() - except: - raise # Needed for Cleanup to know if an exception is handled. finally: worker.Cleanup() @@ -134,7 +126,7 @@ def testTryRaisesFinallyRaises(self): r'While handling a FakeProcessingError, .* was also raised:\n' r'.*' r'FakeCleanupError: Oops!\n', re.DOTALL)) - self.assertCountEqualPy23(worker.called, ['Process', 'Cleanup']) + self.assertCountEqual(worker.called, ['Process', 'Cleanup']) def testTryRaisesFinallyDoesnt(self): worker = FaultyClient(FakeProcessingError) @@ -143,14 +135,12 @@ def testTryRaisesFinallyDoesnt(self): with self.assertRaises(FakeProcessingError): try: worker.Process() - except: - raise # Needed for Cleanup to know if an exception is handled. finally: worker.Cleanup() self.assertLogNotMatches('FakeProcessingError') self.assertLogNotMatches('FakeCleanupError') - self.assertCountEqualPy23(worker.called, ['Process', 'Cleanup']) + self.assertCountEqual(worker.called, ['Process', 'Cleanup']) def testTryPassesFinallyRaises(self): worker = FaultyClient(FakeCleanupError) @@ -160,14 +150,12 @@ def testTryPassesFinallyRaises(self): with self.assertRaises(FakeCleanupError): try: worker.Process() - except: - raise # Needed for Cleanup to know if an exception is handled. finally: worker.Cleanup() self.assertLogNotMatches('FakeProcessingError') self.assertLogNotMatches('FakeCleanupError') - self.assertCountEqualPy23(worker.called, ['Process', 'Cleanup']) + self.assertCountEqual(worker.called, ['Process', 'Cleanup']) def testTryRaisesExceptRaisesFinallyRaises(self): worker = FaultyClient( @@ -186,5 +174,5 @@ def testTryRaisesExceptRaisesFinallyRaises(self): self.assertLogMatches('FakeDisconnectionError') self.assertLogMatches('FakeCleanupError') - self.assertCountEqualPy23(worker.called, - ['Process', 'Disconnect', 'Cleanup']) + self.assertCountEqual(worker.called, + ['Process', 'Disconnect', 'Cleanup']) diff --git a/common/py_utils/py_utils/expectations_parser.py b/common/py_utils/py_utils/expectations_parser.py index 534b3526302..5d67c3030b5 100644 --- a/common/py_utils/py_utils/expectations_parser.py +++ b/common/py_utils/py_utils/expectations_parser.py @@ -13,7 +13,7 @@ class ParseError(Exception): pass -class Expectation(object): +class Expectation(): def __init__(self, reason, test, conditions, results): """Constructor for expectations. @@ -56,7 +56,7 @@ def results(self): return self._results -class TestExpectationParser(object): +class TestExpectationParser(): """Parse expectations data in TA/DA format. This parser covers the 'tagged' test lists format in: @@ -110,14 +110,14 @@ def _ParseExpectationLine(self, line_number, line, tags): % (line_number, line)) # Unused group is optional trailing comment. reason, raw_conditions, test, results, _ = match.groups() - conditions = [c for c in raw_conditions.split()] if raw_conditions else [] + conditions = list(raw_conditions.split()) if raw_conditions else [] for c in conditions: if c not in tags: raise ParseError( 'Condition %s not found in expectations tag data. Line %d' % (c, line_number)) - return Expectation(reason, test, conditions, [r for r in results.split()]) + return Expectation(reason, test, conditions, list(results.split())) @property def expectations(self): diff --git a/common/py_utils/py_utils/lock.py b/common/py_utils/py_utils/lock.py index 5c450693da2..7d8fdb5cf1c 100644 --- a/common/py_utils/py_utils/lock.py +++ b/common/py_utils/py_utils/lock.py @@ -5,6 +5,7 @@ from __future__ import absolute_import # pylint: disable=wrong-import-position import contextlib import os +import six LOCK_EX = None # Exclusive lock LOCK_SH = None # Shared lock @@ -87,10 +88,10 @@ def _LockImplWin(target_file, flags): win32file.LockFileEx(hfile, flags, 0, -0x10000, _OVERLAPPED) except pywintypes.error as exc_value: if exc_value.args[0] == 33: - raise LockException('Error trying acquiring lock of %s: %s' % - (target_file.name, exc_value.args[2])) - else: - raise + six.raise_from(LockException('Error trying acquiring lock of %s: %s' % + (target_file.name, exc_value.args[2])), + exc_value) + raise def _UnlockImplWin(target_file): @@ -112,10 +113,10 @@ def _LockImplPosix(target_file, flags): fcntl.flock(target_file.fileno(), flags) except IOError as exc_value: if exc_value.args[0] == 11 or exc_value.args[0] == 35: - raise LockException('Error trying acquiring lock of %s: %s' % - (target_file.name, exc_value.args[1])) - else: - raise + six.raise_from(LockException('Error trying acquiring lock of %s: %s' % + (target_file.name, exc_value.args[1])), + exc_value) + raise def _UnlockImplPosix(target_file): diff --git a/common/py_utils/py_utils/lock_unittest.py b/common/py_utils/py_utils/lock_unittest.py index 2ba288bd14a..ef2a4150af9 100644 --- a/common/py_utils/py_utils/lock_unittest.py +++ b/common/py_utils/py_utils/lock_unittest.py @@ -73,8 +73,8 @@ def testExclusiveLock(self): # file content as below. expected_file_content = ''.join((['Start'] + ['*']*10000 + ['End']) * 10) with open(self.temp_file_path, 'r') as f: - # Use assertTrue instead of assertEquals since the strings are big, hence - # assertEquals's assertion failure will contain huge strings. + # Use assertTrue instead of assertEqual since the strings are big, hence + # assertEqual's assertion failure will contain huge strings. self.assertTrue(expected_file_content == f.read()) def testSharedLock(self): @@ -102,7 +102,7 @@ def testSharedLock(self): # temp_write_file should contains 10 copy of temp_file_path's content. with open(temp_write_file, 'r') as f: - self.assertEquals('0123456789'*10, f.read()) + self.assertEqual('0123456789'*10, f.read()) finally: os.remove(temp_write_file) @@ -119,7 +119,7 @@ def testNonBlockingLockAcquiring(self): p.start() p.join() with open(temp_status_file, 'r') as f: - self.assertEquals('LockException raised', f.read()) + self.assertEqual('LockException raised', f.read()) finally: os.remove(temp_status_file) @@ -137,7 +137,7 @@ def testUnlockBeforeClosingFile(self): p.start() p.join() with open(temp_status_file, 'r') as f: - self.assertEquals('LockException was not raised', f.read()) + self.assertEqual('LockException was not raised', f.read()) finally: os.remove(temp_status_file) @@ -156,7 +156,7 @@ def testContextualLock(self): p.start() p.join() with open(temp_status_file, 'r') as f: - self.assertEquals('LockException raised', f.read()) + self.assertEqual('LockException raised', f.read()) # Accessing self.temp_file_path here should not raise exception. p = multiprocessing.Process( @@ -165,6 +165,6 @@ def testContextualLock(self): p.start() p.join() with open(temp_status_file, 'r') as f: - self.assertEquals('LockException was not raised', f.read()) + self.assertEqual('LockException was not raised', f.read()) finally: os.remove(temp_status_file) diff --git a/common/py_utils/py_utils/memory_debug.py b/common/py_utils/py_utils/memory_debug.py index a5e5d006303..bca86cccdb4 100755 --- a/common/py_utils/py_utils/memory_debug.py +++ b/common/py_utils/py_utils/memory_debug.py @@ -27,8 +27,7 @@ def GetValueAndUnit(value): if value is not None: return '%.1f %s' % GetValueAndUnit(value) - else: - return 'N/A' + return 'N/A' def _GetProcessInfo(p): diff --git a/common/py_utils/py_utils/modules_util_unittest.py b/common/py_utils/py_utils/modules_util_unittest.py index ad3fbdfb16e..4ff02f2cd48 100644 --- a/common/py_utils/py_utils/modules_util_unittest.py +++ b/common/py_utils/py_utils/modules_util_unittest.py @@ -7,7 +7,7 @@ from py_utils import modules_util -class FakeModule(object): +class FakeModule(): def __init__(self, name, version): self.__name__ = name self.__version__ = version diff --git a/common/py_utils/py_utils/py_utils_unittest.py b/common/py_utils/py_utils/py_utils_unittest.py index 886965d9295..4831a8ce323 100644 --- a/common/py_utils/py_utils/py_utils_unittest.py +++ b/common/py_utils/py_utils/py_utils_unittest.py @@ -54,4 +54,3 @@ def testWaitForTrueLambda(self): def testWaitForFalseLambda(self): with self.assertRaises(py_utils.TimeoutException): py_utils.WaitFor(lambda: False, .1) - diff --git a/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py b/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py index bdaec61b3ae..716936d14d0 100644 --- a/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py +++ b/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py @@ -13,20 +13,20 @@ class AnnotatedSymbol(snippet.Symbol): def __init__(self, symbol_type, children): - super(AnnotatedSymbol, self).__init__(symbol_type, children) + super().__init__(symbol_type, children) self._modified = False @property def modified(self): if self._modified: return True - return super(AnnotatedSymbol, self).modified + return super().modified def __setattr__(self, name, value): if (hasattr(self.__class__, name) and isinstance(getattr(self.__class__, name), property)): self._modified = True - return super(AnnotatedSymbol, self).__setattr__(name, value) + return super().__setattr__(name, value) def Cut(self, child): for i in range(len(self._children)): diff --git a/common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py b/common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py index b9b121fa4af..cad50879f13 100644 --- a/common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py +++ b/common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py @@ -24,13 +24,11 @@ def Annotate(cls, symbol_type, children): return None statement = compound_statement.children[0] - if statement.type == symbol.classdef: - return cls(statement.type, statement.children) - elif (statement.type == symbol.decorated and + if statement.type == symbol.classdef or \ + (statement.type == symbol.decorated and statement.children[-1].type == symbol.classdef): return cls(statement.type, statement.children) - else: - return None + return None @property def suite(self): diff --git a/common/py_utils/py_utils/refactor/annotated_symbol/function_definition.py b/common/py_utils/py_utils/refactor/annotated_symbol/function_definition.py index 39389386e8a..52719fe68dc 100644 --- a/common/py_utils/py_utils/refactor/annotated_symbol/function_definition.py +++ b/common/py_utils/py_utils/refactor/annotated_symbol/function_definition.py @@ -24,13 +24,12 @@ def Annotate(cls, symbol_type, children): return None statement = compound_statement.children[0] - if statement.type == symbol.funcdef: + if statement.type == symbol.funcdef or \ + (statement.type == symbol.decorated and + statement.children[-1].type == symbol.funcdef): return cls(statement.type, statement.children) - elif (statement.type == symbol.decorated and - statement.children[-1].type == symbol.funcdef): - return cls(statement.type, statement.children) - else: - return None + + return None @property def suite(self): diff --git a/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py b/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py index 54a3935ca05..9ebdad54077 100644 --- a/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py +++ b/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py @@ -62,8 +62,8 @@ def value(self, value): class AsName(base_symbol.AnnotatedSymbol): @classmethod def Annotate(cls, symbol_type, children): - if (symbol_type != symbol.dotted_as_name and - symbol_type != symbol.import_as_name): + if symbol_type not in \ + (symbol.dotted_as_name, symbol.import_as_name): return None return cls(symbol_type, children) @@ -218,8 +218,7 @@ def alias(self, value): # pylint: disable=arguments-differ def name(self): if self.alias: return self.alias - else: - return self.path + return self.path class ImportFrom(Import): @@ -281,8 +280,7 @@ def module(self): import_as_name = self._import_as_name if import_as_name: return import_as_name.name - else: - return '*' + return '*' @module.setter def module(self, value): @@ -293,12 +291,11 @@ def module(self, value): if value == '*': # TODO: Implement this. raise NotImplementedError() + if import_as_name: + import_as_name.name = value else: - if import_as_name: - import_as_name.name = value - else: - # TODO: Implement this. - raise NotImplementedError() + # TODO: Implement this. + raise NotImplementedError() @property def path(self): @@ -313,8 +310,7 @@ def alias(self): import_as_name = self._import_as_name if import_as_name: return import_as_name.alias - else: - return None + return None @alias.setter def alias(self, value): # pylint: disable=arguments-differ @@ -327,5 +323,4 @@ def alias(self, value): # pylint: disable=arguments-differ def name(self): if self.alias: return self.alias - else: - return self.module + return self.module diff --git a/common/py_utils/py_utils/refactor/annotated_symbol/reference.py b/common/py_utils/py_utils/refactor/annotated_symbol/reference.py index 493176e65a4..6d51d9729ee 100644 --- a/common/py_utils/py_utils/refactor/annotated_symbol/reference.py +++ b/common/py_utils/py_utils/refactor/annotated_symbol/reference.py @@ -47,7 +47,7 @@ def Annotate(cls, nodes): return [cls(nodes[:i])] + nodes[i:] def __init__(self, children): - super(Reference, self).__init__(-1, children) + super().__init__(-1, children) @property def type_name(self): diff --git a/common/py_utils/py_utils/refactor/module.py b/common/py_utils/py_utils/refactor/module.py index 8b1b0dc0a51..c01dba459dd 100644 --- a/common/py_utils/py_utils/refactor/module.py +++ b/common/py_utils/py_utils/refactor/module.py @@ -6,7 +6,7 @@ from py_utils.refactor import annotated_symbol -class Module(object): +class Module(): def __init__(self, file_path): self._file_path = file_path diff --git a/common/py_utils/py_utils/refactor/offset_token.py b/common/py_utils/py_utils/refactor/offset_token.py index deca085879a..3c2cf73ac85 100644 --- a/common/py_utils/py_utils/refactor/offset_token.py +++ b/common/py_utils/py_utils/refactor/offset_token.py @@ -20,7 +20,7 @@ def _Pairwise(iterable): return zip(a, b) -class OffsetToken(object): +class OffsetToken(): """A Python token with a relative position. A token is represented by a type defined in Python's token module, a string diff --git a/common/py_utils/py_utils/refactor/snippet.py b/common/py_utils/py_utils/refactor/snippet.py index 76d62bb539d..f9fa54afa45 100644 --- a/common/py_utils/py_utils/refactor/snippet.py +++ b/common/py_utils/py_utils/refactor/snippet.py @@ -14,7 +14,7 @@ from py_utils.refactor import offset_token -class Snippet(object): +class Snippet(): """A node in the Python parse tree. The Python grammar is defined at: @@ -71,7 +71,7 @@ def FindChild(self, snippet_type, **kwargs): if not isinstance(child, snippet_type): continue - for attribute, value in kwargs: + for attribute, value in kwargs.items(): if getattr(child, attribute) != value: break else: @@ -224,24 +224,23 @@ def _SnippetizeNode(node, tokens): # Symbol. children = tuple(_SnippetizeNode(child, tokens) for child in node[1:]) return Symbol(node_type, children) - else: - # Token. - grabbed_tokens = [] - while tokens and ( - tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): - grabbed_tokens.append(tokens.popleft()) - - # parser has 2 NEWLINEs right before the end. - # tokenize has 0 or 1 depending on if the file has one. - # Create extra nodes without consuming tokens to account for this. - if node_type == token.NEWLINE: - for tok in tokens: - if tok.type == token.ENDMARKER: - return TokenSnippet(node_type, grabbed_tokens) - if tok.type != token.DEDENT: - break + # Token. + grabbed_tokens = [] + while tokens and ( + tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): + grabbed_tokens.append(tokens.popleft()) - assert tokens[0].type == token.OP or node_type == tokens[0].type + # parser has 2 NEWLINEs right before the end. + # tokenize has 0 or 1 depending on if the file has one. + # Create extra nodes without consuming tokens to account for this. + if node_type == token.NEWLINE: + for tok in tokens: + if tok.type == token.ENDMARKER: + return TokenSnippet(node_type, grabbed_tokens) + if tok.type != token.DEDENT: + break - grabbed_tokens.append(tokens.popleft()) - return TokenSnippet(node_type, grabbed_tokens) + assert tokens[0].type == token.OP or node_type == tokens[0].type + + grabbed_tokens.append(tokens.popleft()) + return TokenSnippet(node_type, grabbed_tokens) diff --git a/common/py_utils/py_utils/refactor_util/move.py b/common/py_utils/py_utils/refactor_util/move.py index 72c65a19029..12beb16c34f 100644 --- a/common/py_utils/py_utils/refactor_util/move.py +++ b/common/py_utils/py_utils/refactor_util/move.py @@ -41,7 +41,7 @@ def _Update(moves, module): print('Error updating %s: %s' % (module.file_path, e), file=sys.stderr) -class _Move(object): +class _Move(): def __init__(self, source, target): self._source_path = os.path.realpath(source) diff --git a/common/py_utils/py_utils/retry_util_unittest.py b/common/py_utils/py_utils/retry_util_unittest.py index 113b9b1806c..b28a0c8cd3b 100644 --- a/common/py_utils/py_utils/retry_util_unittest.py +++ b/common/py_utils/py_utils/retry_util_unittest.py @@ -77,8 +77,7 @@ def Test(retries=None): self.num_calls += 1 if self.num_calls < 3: raise KeyError('oops!') - else: - return 'OK!' + return 'OK!' # The value is returned after the expected number of calls. self.assertEqual(Test(), 'OK!') @@ -91,8 +90,7 @@ def Test(retries=None): self.num_calls += 1 if self.num_calls < 3: raise KeyError('oops!') - else: - return 'OK!' + return 'OK!' # We fail immediately on the first try. with self.assertRaises(KeyError): @@ -106,10 +104,9 @@ def Test(retries=None): self.num_calls += 1 if self.num_calls == 1: raise KeyError('oops!') - elif self.num_calls == 2: + if self.num_calls == 2: raise ValueError('uh oh!') - else: - return 'OK!' + return 'OK!' # Call eventually succeeds after enough tries. self.assertEqual(Test(retries=5), 'OK!') diff --git a/common/py_utils/py_utils/slots_metaclass.py b/common/py_utils/py_utils/slots_metaclass.py index ae36c6778d9..53227d1d2c5 100644 --- a/common/py_utils/py_utils/slots_metaclass.py +++ b/common/py_utils/py_utils/slots_metaclass.py @@ -20,8 +20,8 @@ class contains. add '__dict__' to its __slots__. """ - def __new__(mcs, name, bases, attrs): + def __new__(cls, name, bases, attrs): assert '__slots__' in attrs, 'Class "%s" must define __slots__' % name assert isinstance(attrs['__slots__'], tuple), '__slots__ must be a tuple' - return super(SlotsMetaclass, mcs).__new__(mcs, name, bases, attrs) + return super(SlotsMetaclass, cls).__new__(cls, name, bases, attrs) diff --git a/common/py_utils/py_utils/slots_metaclass_unittest.py b/common/py_utils/py_utils/slots_metaclass_unittest.py index 702371a79a4..38b9da0c0aa 100644 --- a/common/py_utils/py_utils/slots_metaclass_unittest.py +++ b/common/py_utils/py_utils/slots_metaclass_unittest.py @@ -18,28 +18,19 @@ class SlotsMetaclassUnittest(unittest.TestCase): def testSlotsMetaclass(self): class NiceClass(six.with_metaclass(slots_metaclass.SlotsMetaclass, object)): - __slots__ = '_nice', + __slots__ = ('_nice',) def __init__(self, nice): self._nice = nice NiceClass(42) - with self.assertRaises(AssertionError): - class NaughtyClass(NiceClass): - def __init__(self, naughty): - super(NaughtyClass, self).__init__(42) - self._naughty = naughty - - # Metaclasses are called when the class is defined, so no need to - # instantiate it. - with self.assertRaises(AttributeError): class NaughtyClass2(NiceClass): __slots__ = () def __init__(self, naughty): - super(NaughtyClass2, self).__init__(42) + super().__init__(42) self._naughty = naughty # pylint: disable=assigning-non-slot # SlotsMetaclass is happy that __slots__ is defined, but python won't be diff --git a/common/py_utils/py_utils/tempfile_ext_unittest.py b/common/py_utils/py_utils/tempfile_ext_unittest.py index 0f9e2da2524..194c01bcdf2 100644 --- a/common/py_utils/py_utils/tempfile_ext_unittest.py +++ b/common/py_utils/py_utils/tempfile_ext_unittest.py @@ -38,7 +38,7 @@ def testDir(self): test_dir = '/baz' self.fs.CreateDirectory(test_dir) with tempfile_ext.NamedTemporaryDirectory(dir=test_dir) as d: - self.assertEquals(test_dir, os.path.dirname(d)) + self.assertEqual(test_dir, os.path.dirname(d)) class TemporaryFilesTest(fake_filesystem_unittest.TestCase): diff --git a/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py b/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py index d5b8ff5f71f..29554da92df 100644 --- a/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py +++ b/common/py_utils/py_utils/test_data/discoverable_classes/another_discover_dummyclass.py @@ -15,20 +15,20 @@ class _PrivateDummyException(discover_dummyclass.DummyException): def __init__(self): - super(_PrivateDummyException, self).__init__() + discover_dummyclass.DummyException.__init__(self) class DummyExceptionImpl1(_PrivateDummyException): def __init__(self): - super(DummyExceptionImpl1, self).__init__() + _PrivateDummyException.__init__(self) class DummyExceptionImpl2(_PrivateDummyException): def __init__(self): - super(DummyExceptionImpl2, self).__init__() + _PrivateDummyException.__init__(self) class DummyExceptionWithParameterImpl1(_PrivateDummyException): def __init__(self, parameter): - super(DummyExceptionWithParameterImpl1, self).__init__() + super().__init__() del parameter diff --git a/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py b/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py index 15dcb35a4d5..239651fa5bb 100644 --- a/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py +++ b/common/py_utils/py_utils/test_data/discoverable_classes/discover_dummyclass.py @@ -6,4 +6,4 @@ class DummyException(Exception): def __init__(self): - super(DummyException, self).__init__() + Exception.__init__(self) diff --git a/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py b/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py index 0287b6481d9..cd48971f622 100644 --- a/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py +++ b/common/py_utils/py_utils/test_data/discoverable_classes/parameter_discover_dummyclass.py @@ -8,5 +8,5 @@ class DummyExceptionWithParameterImpl2(discover_dummyclass.DummyException): def __init__(self, parameter1, parameter2): - super(DummyExceptionWithParameterImpl2, self).__init__() + super().__init__() del parameter1, parameter2 diff --git a/common/py_utils/py_utils/ts_proxy_server.py b/common/py_utils/py_utils/ts_proxy_server.py index 652eb38310c..5b642cdaccb 100644 --- a/common/py_utils/py_utils/ts_proxy_server.py +++ b/common/py_utils/py_utils/ts_proxy_server.py @@ -13,6 +13,7 @@ import subprocess import sys import time +import six try: import fcntl @@ -28,7 +29,6 @@ class TsProxyServerError(Exception): """Catch-all exception for tsProxy Server.""" - pass def ParseTsProxyPortFromOutput(output_line): port_re = re.compile( @@ -38,9 +38,10 @@ def ParseTsProxyPortFromOutput(output_line): m = port_re.match(output_line) if m: return int(m.group('port')) + return None -class TsProxyServer(object): +class TsProxyServer(): """Start and stop tsproxy. TsProxy provides basic latency, download and upload traffic shaping. This @@ -90,10 +91,13 @@ def StartServer(self, timeout=10, retries=None): cmd_line.append( '--mapports=443:%s,*:%s' % (self._https_port, self._http_port)) logging.info('Tsproxy commandline: %s', cmd_line) - # In python3 subprocess handles encoding/decoding; this warns if it won't be UTF-8. + # In python3 subprocess handles encoding/decoding; this warns if it won't + # be UTF-8. if locale.getpreferredencoding() != 'UTF-8': - logging.warn('Decoding will use %s instead of UTF-8', locale.getpreferredencoding()) - # In python3 universal_newlines forces subprocess to encode/decode, allowing per-line buffering. + logging.warning('Decoding will use %s instead of UTF-8', + locale.getpreferredencoding()) + # In python3 universal_newlines forces subprocess to encode/decode, + # allowing per-line buffering. self._proc = subprocess.Popen( cmd_line, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True) @@ -115,8 +119,8 @@ def StartServer(self, timeout=10, retries=None): err = self.StopServer() if err: logging.error('Error stopping WPR server:\n%s', err) - raise TsProxyServerError( - 'Error starting tsproxy: timed out after %s seconds' % timeout) + six.raise_from(TsProxyServerError( + 'Error starting tsproxy: timed out after %s seconds' % timeout), None) def _IsStarted(self): assert not self._is_running @@ -127,7 +131,7 @@ def _IsStarted(self): output_line = self._ReadLineTsProxyStdout(timeout=5) logging.debug('TsProxy output: %s', output_line) self._port = ParseTsProxyPortFromOutput(output_line) - return self._port != None + return self._port is not None def _ReadLineTsProxyStdout(self, timeout): def ReadSingleLine(): @@ -158,7 +162,8 @@ def CommandStatusIsRead(): logging.log(logging.DEBUG if success else logging.ERROR, 'TsProxy output:\n%s', '\n'.join(command_output)) if not success: - raise TsProxyServerError('Failed to execute command: %s', command_string) + six.raise_from(TsProxyServerError('Failed to execute command: %s', + command_string), None) def UpdateOutboundPorts(self, http_port, https_port, timeout=5): assert http_port and https_port @@ -196,9 +201,9 @@ def StopServer(self): """Stop TsProxy Server.""" if not self._is_running: logging.debug('Attempting to stop TsProxy server that is not running.') - return + return None if not self._proc: - return + return None try: self._IssueCommand('exit', timeout=10) py_utils.WaitFor(lambda: self._proc.poll() is not None, 10) diff --git a/common/py_utils/py_utils/ts_proxy_server_unittest.py b/common/py_utils/py_utils/ts_proxy_server_unittest.py index ef229e32ea8..cbc398f9f03 100644 --- a/common/py_utils/py_utils/ts_proxy_server_unittest.py +++ b/common/py_utils/py_utils/ts_proxy_server_unittest.py @@ -9,15 +9,15 @@ class TsProxyServerTest(unittest.TestCase): def testParseTsProxyPort(self): - self.assertEquals( + self.assertEqual( ts_proxy_server.ParseTsProxyPortFromOutput( 'Started Socks5 proxy server on 127.0.0.1:54430 \n'), 54430) - self.assertEquals( + self.assertEqual( ts_proxy_server.ParseTsProxyPortFromOutput( 'Started Socks5 proxy server on foo.bar.com:430 \n'), 430) - self.assertEquals( + self.assertEqual( ts_proxy_server.ParseTsProxyPortFromOutput( 'Failed to start sock5 proxy.'), None) @@ -45,13 +45,13 @@ def testSmokeUpdateTrafficSettings(self): server.UpdateTrafficSettings(download_bandwidth_kbps=5000) server.UpdateTrafficSettings(upload_bandwidth_kbps=2000) - self.assertEquals(server._rtt, 100) - self.assertEquals(server._inbkps, 5000) - self.assertEquals(server._outkbps, 2000) + self.assertEqual(server._rtt, 100) + self.assertEqual(server._inbkps, 5000) + self.assertEqual(server._outkbps, 2000) server.UpdateTrafficSettings( round_trip_latency_ms=200, download_bandwidth_kbps=500, upload_bandwidth_kbps=2000) - self.assertEquals(server._rtt, 200) - self.assertEquals(server._inbkps, 500) - self.assertEquals(server._outkbps, 2000) + self.assertEqual(server._rtt, 200) + self.assertEqual(server._inbkps, 500) + self.assertEqual(server._outkbps, 2000) diff --git a/common/py_utils/py_utils/webpagereplay_go_server.py b/common/py_utils/py_utils/webpagereplay_go_server.py index b2b11cc6e59..a3bc2aa5f74 100644 --- a/common/py_utils/py_utils/webpagereplay_go_server.py +++ b/common/py_utils/py_utils/webpagereplay_go_server.py @@ -36,8 +36,6 @@ class ReplayError(Exception): """Catch-all exception for the module.""" - pass - class ReplayNotFoundError(ReplayError): def __init__(self, label, path): @@ -50,7 +48,7 @@ def __init__(self, label, path): path: A string of the path in this error. """ - super(ReplayNotFoundError, self).__init__() + super().__init__() self.args = (label, path) def __str__(self): @@ -62,7 +60,7 @@ class ReplayNotStartedError(ReplayError): pass -class ReplayServer(object): +class ReplayServer(): """Start and Stop Web Page Replay. Web Page Replay is a proxy that can record and "replay" web pages with @@ -140,7 +138,7 @@ def _GetGoBinaryPath(self, replay_options): print(subprocess.check_output( ['go', 'build', os.path.join(go_folder, 'wpr.go')])) except subprocess.CalledProcessError: - exit(1) + sys.exit(1) os.chdir(cur_cwd) return os.path.join(go_folder, 'wpr') @@ -309,6 +307,7 @@ def StartServer(self): logging.info('Starting Web-Page-Replay: %s', self._cmd_line) self._CreateTempLogFilePath() with self._OpenLogFile() as log_fh: + # pylint: disable=subprocess-popen-preexec-fn self.replay_process = subprocess.Popen( self._cmd_line, stdout=log_fh, stderr=subprocess.STDOUT, preexec_fn=(_ResetInterruptHandler if is_posix else None)) @@ -319,9 +318,12 @@ def StartServer(self): logging.info('WPR ports: %s', self._started_ports) atexit_with_log.Register(self.StopServer) return dict(self._started_ports) - except Exception: + except Exception: # pylint: disable=broad-except self.StopServer(logging.ERROR) - raise ReplayNotStartedError('Web Page Replay failed to start.') + six.raise_from(ReplayNotStartedError('Web Page Replay failed to start.'), + None) + + return None def _IsReplayProcessStarted(self): if not self.replay_process: @@ -385,12 +387,14 @@ def _CreateTempLogFilePath(self): def _CleanUpTempLogFilePath(self, log_level): if not self._temp_log_file_path: return '' - if logging.getLogger('').isEnabledFor(log_level) or USE_LOCAL_WPR in self._replay_options: + if logging.getLogger('').isEnabledFor(log_level) or USE_LOCAL_WPR \ + in self._replay_options: with open(self._temp_log_file_path, 'r') as f: wpr_log_output = f.read() - output = ('************************** WPR LOG *****************************\n' + + asterisk_str = '**************************' + output = (asterisk_str + ' WPR LOG ' + asterisk_str + '\n' + '\n'.join(wpr_log_output.split('\n')) + - '************************** END OF WPR LOG **********************') + asterisk_str + ' END OF WPR LOG ' + asterisk_str) if logging.getLogger('').isEnabledFor(log_level): logging.log(log_level, output) else: @@ -398,6 +402,7 @@ def _CleanUpTempLogFilePath(self, log_level): os.remove(self._temp_log_file_path) self._temp_log_file_path = None + return None def __enter__(self): """Add support for with-statement.""" @@ -423,7 +428,8 @@ def _UrlOpen(self, url_path, protocol='http'): url = '%s://%s:%s/%s' % ( protocol, self._replay_host, self._started_ports[protocol], url_path) - return six.moves.urllib.request.FancyURLopener({}).open(url) # pylint: disable=no-member + # pylint: disable=no-member + return six.moves.urllib.request.FancyURLopener({}).open(url) def _ResetInterruptHandler(): """Reset the interrupt handler back to the default. diff --git a/common/py_vulcanize/README.chromium b/common/py_vulcanize/README.chromium index 0b32761b781..128566e467c 100644 --- a/common/py_vulcanize/README.chromium +++ b/common/py_vulcanize/README.chromium @@ -1,6 +1,7 @@ Name: py_vulcanize URL: N/A Version: N/A +Shipped: yes Description: Py-vulcanize, formerly known as TVCM (trace-viewer component model). diff --git a/common/py_vulcanize/bin/run_py_tests b/common/py_vulcanize/bin/run_py_tests index 904c2138b5f..6ef68ff224b 100755 --- a/common/py_vulcanize/bin/run_py_tests +++ b/common/py_vulcanize/bin/run_py_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython3 # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/common/py_vulcanize/py_vulcanize/fake_fs.py b/common/py_vulcanize/py_vulcanize/fake_fs.py index f4e3512b395..a26b92fc86a 100644 --- a/common/py_vulcanize/py_vulcanize/fake_fs.py +++ b/common/py_vulcanize/py_vulcanize/fake_fs.py @@ -6,16 +6,14 @@ from __future__ import division from __future__ import print_function +import builtins import codecs import collections import os import six -import sys from io import BytesIO -if six.PY3: - import builtins class WithableStringIO(six.StringIO): @@ -43,10 +41,7 @@ def __init__(self, initial_filenames_and_contents=None): self._bound = False self._real_codecs_open = codecs.open - if six.PY3: - self._real_open = builtins.open - else: - self._real_open = sys.modules['__builtin__'].open + self._real_open = builtins.open self._real_abspath = os.path.abspath self._real_exists = os.path.exists @@ -63,10 +58,7 @@ def __exit__(self, *args): def Bind(self): assert not self._bound codecs.open = self._FakeCodecsOpen - if six.PY3: - builtins.open = self._FakeOpen - else: - sys.modules['__builtin__'].open = self._FakeOpen + builtins.open = self._FakeOpen os.path.abspath = self._FakeAbspath os.path.exists = self._FakeExists os.walk = self._FakeWalk @@ -76,10 +68,7 @@ def Bind(self): def Unbind(self): assert self._bound codecs.open = self._real_codecs_open - if six.PY3: - builtins.open = self._real_open - else: - sys.modules['__builtin__'].open = self._real_open + builtins.open = self._real_open os.path.abspath = self._real_abspath os.path.exists = self._real_exists os.walk = self._real_walk diff --git a/common/py_vulcanize/py_vulcanize/html_generation_controller.py b/common/py_vulcanize/py_vulcanize/html_generation_controller.py index 991652cc1f7..d10537aa263 100644 --- a/common/py_vulcanize/py_vulcanize/html_generation_controller.py +++ b/common/py_vulcanize/py_vulcanize/html_generation_controller.py @@ -18,7 +18,7 @@ def GetHTMLForStylesheetHRef(self, href): # pylint: disable=unused-argument def GetHTMLForInlineStylesheet(self, contents): if self.current_module is None: - if re.search('url\(.+\)', contents): + if re.search(r'url\(.+\)', contents): raise Exception( 'Default HTMLGenerationController cannot handle inline style urls') return contents diff --git a/common/py_vulcanize/py_vulcanize/html_module_unittest.py b/common/py_vulcanize/py_vulcanize/html_module_unittest.py index b6d9f93a5ee..25de94f5e8e 100644 --- a/common/py_vulcanize/py_vulcanize/html_module_unittest.py +++ b/common/py_vulcanize/py_vulcanize/html_module_unittest.py @@ -5,7 +5,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from io import BytesIO import os import unittest @@ -18,9 +17,9 @@ from py_vulcanize import project as project_module from py_vulcanize import resource from py_vulcanize import resource_loader as resource_loader +import functools import six -if six.PY3: - import functools + class ResourceWithFakeContents(resource.Resource): @@ -59,10 +58,9 @@ def FindResourceGivenAbsolutePath(self, absolute_path): return None # Sort by length. Longest match wins. - if six.PY3: - sorted(candidate_paths, key=functools.cmp_to_key(lambda x, y: len(x) - len(y)), reverse=True) - else: - candidate_paths.sort(lambda x, y: len(x) - len(y)) + sorted(candidate_paths, + key=functools.cmp_to_key(lambda x, y: len(x) - len(y)), reverse=True) + longest_candidate = candidate_paths[-1] return ResourceWithFakeContents( diff --git a/common/py_vulcanize/py_vulcanize/js_utils.py b/common/py_vulcanize/py_vulcanize/js_utils.py index 6e6ca9db674..c03187c5cda 100644 --- a/common/py_vulcanize/py_vulcanize/js_utils.py +++ b/common/py_vulcanize/py_vulcanize/js_utils.py @@ -4,4 +4,4 @@ def EscapeJSIfNeeded(js): - return js.replace('', '<\/script>') + return js.replace('', r'<\/script>') diff --git a/common/py_vulcanize/py_vulcanize/parse_html_deps.py b/common/py_vulcanize/py_vulcanize/parse_html_deps.py index 5fee1774251..19d7ef6470c 100644 --- a/common/py_vulcanize/py_vulcanize/parse_html_deps.py +++ b/common/py_vulcanize/py_vulcanize/parse_html_deps.py @@ -26,26 +26,20 @@ def _InitBeautifulSoup(): catapult_path = os.path.abspath( os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir)) - if six.PY3: - # Filter out warnings related to soupsieve from beautifulsoup. - # We do not need it and it generates unnecessary warnings during build. - warnings.filterwarnings('ignore', message='.*soupsieve.*', - category=UserWarning, module='bs4') - bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4-4.9.3', 'py3k') - else: - bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4') + # Filter out warnings related to soupsieve from beautifulsoup. + # We do not need it and it generates unnecessary warnings during build. + warnings.filterwarnings('ignore', message='.*soupsieve.*', + category=UserWarning, module='bs4') + bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4-4.9.3', + 'py3k') _AddToPathIfNeeded(bs_path) - if six.PY3: - html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-1.1') - else: - html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-python') + html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-1.1') _AddToPathIfNeeded(html5lib_path) - if six.PY3: - webencodings_path = os.path.join( - catapult_path, 'third_party', 'webencodings-0.5.1') - _AddToPathIfNeeded(webencodings_path) + webencodings_path = os.path.join( + catapult_path, 'third_party', 'webencodings-0.5.1') + _AddToPathIfNeeded(webencodings_path) six_path = os.path.join(catapult_path, 'third_party', 'six') _AddToPathIfNeeded(six_path) @@ -299,6 +293,6 @@ def Parse(self, html): html = '' else: if html.find('< /script>') != -1: - raise Exception('Escape script tags with <\/script>') + raise Exception(r'Escape script tags with <\/script>') return HTMLModuleParserResults(html) diff --git a/common/py_vulcanize/py_vulcanize/resource_loader.py b/common/py_vulcanize/py_vulcanize/resource_loader.py index 853069515c3..652fa6c1b96 100644 --- a/common/py_vulcanize/py_vulcanize/resource_loader.py +++ b/common/py_vulcanize/py_vulcanize/resource_loader.py @@ -6,11 +6,9 @@ from __future__ import absolute_import import codecs +import functools import os import six -import sys -if six.PY3: - import functools from py_vulcanize import module from py_vulcanize import style_sheet as style_sheet_module @@ -64,10 +62,8 @@ def FindResourceGivenAbsolutePath(self, absolute_path, binary=False): return None # Sort by length. Longest match wins. - if six.PY3: - sorted(candidate_paths, key=functools.cmp_to_key(lambda x, y: len(x) - len(y)), reverse=True) - else: - candidate_paths.sort(lambda x, y: len(x) - len(y)) + sorted(candidate_paths, + key=functools.cmp_to_key(lambda x, y: len(x) - len(y)), reverse=True) longest_candidate = candidate_paths[-1] return resource_module.Resource(longest_candidate, absolute_path, binary) diff --git a/common/py_vulcanize/py_vulcanize/style_sheet.py b/common/py_vulcanize/py_vulcanize/style_sheet.py index 2ffc4ccfc3d..7a0fdc39095 100644 --- a/common/py_vulcanize/py_vulcanize/style_sheet.py +++ b/common/py_vulcanize/py_vulcanize/style_sheet.py @@ -60,7 +60,7 @@ def InlineUrl(m): return 'url(data:image/%s;base64,%s)' % (ext[1:], data.decode('utf-8')) # I'm assuming we only have url()'s associated with images - return re.sub('url\((?P"|\'|)(?P[^"\'()]*)(?P=quote)\)', + return re.sub(r'url\((?P"|\'|)(?P[^"\'()]*)(?P=quote)\)', InlineUrl, self.contents) def AppendDirectlyDependentFilenamesTo(self, dependent_filenames): @@ -72,7 +72,7 @@ def _Load(self, containing_dirname): raise Exception('@imports are not supported') matches = re.findall( - 'url\((?:["|\']?)([^"\'()]*)(?:["|\']?)\)', + r'url\((?:["|\']?)([^"\'()]*)(?:["|\']?)\)', self.contents) def resolve_url(url): diff --git a/common/py_vulcanize/third_party/rcssmin/README.chromium b/common/py_vulcanize/third_party/rcssmin/README.chromium index b1350fc9fa4..d167414aacc 100644 --- a/common/py_vulcanize/third_party/rcssmin/README.chromium +++ b/common/py_vulcanize/third_party/rcssmin/README.chromium @@ -3,8 +3,9 @@ Short Name: rcssmin URL: http://opensource.perlig.de/rcssmin/ Version: 1.0.5 License: Apache 2.0 -License File: NOT_SHIPPED +License File: LICENSE Security Critical: no +Shipped: no Description: rCSSmin is a CSS minifier written in python. diff --git a/common/py_vulcanize/third_party/rjsmin/README.chromium b/common/py_vulcanize/third_party/rjsmin/README.chromium index 256518fd917..d88bd581cf1 100644 --- a/common/py_vulcanize/third_party/rjsmin/README.chromium +++ b/common/py_vulcanize/third_party/rjsmin/README.chromium @@ -2,8 +2,9 @@ Short Name: rJSmin URL: http://opensource.perlig.de/rjsmin/ Version: 1.0.12 License: Apache 2.0 -License File: NOT_SHIPPED +License File: LICENSE Security Critical: no +Shipped: no Description: rJSmin is a javascript minifier written in python. diff --git a/dashboard/.coveragerc b/dashboard/.coveragerc new file mode 100644 index 00000000000..29976b3e5c2 --- /dev/null +++ b/dashboard/.coveragerc @@ -0,0 +1,7 @@ +[run] +# based on .gitignore +omit = + *pb2.py + +[expect_tests] +expected_coverage_min = 0 \ No newline at end of file diff --git a/dashboard/.gitignore b/dashboard/.gitignore deleted file mode 100644 index a52c547dea2..00000000000 --- a/dashboard/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Do not check in generated protocol buffers. -*_pb2.py diff --git a/dashboard/.style.yapf b/dashboard/.style.yapf index 30eb9b004e1..b5eb7e5d5f5 100644 --- a/dashboard/.style.yapf +++ b/dashboard/.style.yapf @@ -1,5 +1,5 @@ [style] -based_on_style = chromium +based_on_style = yapf spaces_before_comment = 2 split_before_logical_operator = true diff --git a/dashboard/.vpython3 b/dashboard/.vpython3 new file mode 100644 index 00000000000..88722eceb75 --- /dev/null +++ b/dashboard/.vpython3 @@ -0,0 +1,268 @@ +# This is a vpython "spec" file, meant specifically for the chromeperf +# dashboard GAE app and associated services defined in catapult/dashboard. +# +# Try to keep this in sync with packages/versions declared in the +# requirements.txt file in this same directory. +# +# Read more about `vpython` and how to modify this file here: +# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md + +python_version: "3.8" + +# Used by: +# vpython3 bin/run_py_test +# This is used in pre-submit try jobs, which used to rely on gae-sdk from cipd, +# and in post-submit cloud biulds, which used to rely on google/cloud-sdk +# docker image. Both sources are out of date and do not support python 3. +wheel: < + name: "infra/python/wheels/appengine-python-standard-py3" + version: "version:1.1.1" +> +wheel: < + name: "infra/python/wheels/pycparser-py2_py3" + version: "version:2.21" +> +wheel: < + name: "infra/python/wheels/certifi-py3" + version: "version:2023.5.7" +> +wheel: < + name: "infra/python/wheels/cffi/${vpython_platform}" + version: "version:1.15.1" +> +wheel: < + name: "infra/python/wheels/google-cloud-storage-py3" + version: "version:2.1.0" +> +wheel: < + name: "infra/python/wheels/google-resumable-media-py3" + version: "version:2.3.0" +> +wheel: < + name: "infra/python/wheels/google-crc32c/${vpython_platform}" + version: "version:1.3.0" +> +wheel: < + name: "infra/python/wheels/protobuf-py3" + version: "version:3.19.4" +> +wheel: < + name: "infra/python/wheels/mock-py3" + version: "version:4.0.3" +> +wheel: < + name: "infra/python/wheels/pillow/${vpython_platform}" + version: "version:8.3.1" +> +wheel: < + name: "infra/python/wheels/ruamel_yaml-py3" + version: "version:0.17.16" +> +wheel: < + name: "infra/python/wheels/requests-py3" + version: "version:2.31.0" +> +wheel: < + name: "infra/python/wheels/frozendict-py3" + version: "version:2.0.6" +> +wheel: < + name: "infra/python/wheels/pytz-py2_py3" + version: "version:2021.1" +> +wheel: < + name: "infra/python/wheels/urllib3-py2_py3" + version: "version:1.26.6" +> +wheel: < + name: "infra/python/wheels/google-auth-py2_py3" + version: "version:1.35.0" +> +# TODO(https://crbug.com/898348): Add in necessary wheels as Python3 versions +# become available. +wheel: < + name: "infra/python/wheels/six-py2_py3" + version: "version:1.15.0" +> +wheel < + name: "infra/python/wheels/attrs-py2_py3" + version: "version:21.4.0" +> +wheel: < + name: "infra/python/wheels/pyasn1_modules-py2_py3" + version: "version:0.2.8" +> +wheel: < + name: "infra/python/wheels/rsa-py3" + version: "version:4.7.2" +> +wheel: < + name: "infra/python/wheels/cachetools-py3" + version: "version:4.2.2" +> +wheel: < + name: "infra/python/wheels/google-cloud-core-py3" + version: "version:2.2.2" +> +wheel: < + name: "infra/python/wheels/google-api-core-py3" + version: "version:1.31.5" +> +wheel: < + name: "infra/python/wheels/google-api-python-client-py3" + version: "version:2.2.0" +> +wheel: < + name: "infra/python/wheels/packaging-py2_py3" + version: "version:16.8" +> +wheel: < + name: "infra/python/wheels/googleapis-common-protos-py2_py3" + version: "version:1.52.0" +> +wheel: < + name: "infra/python/wheels/pyparsing-py2_py3" + version: "version:2.4.7" +> +wheel: < + name: "infra/python/wheels/pyasn1-py2_py3" + version: "version:0.4.8" +> +wheel: < + name: "infra/python/wheels/charset_normalizer-py3" + version: "version:2.0.4" +> +wheel: < + name: "infra/python/wheels/idna-py2_py3" + version: "version:2.8" +> +wheel: < + name: "infra/python/wheels/ruamel_yaml_clib/${vpython_platform}" + version: "version:0.2.6" +> +wheel: < + name: "infra/python/wheels/pyyaml-py3" + version: "version:5.3.1" +> +wheel: < + name: "infra/python/wheels/typing-extensions-py3" + version: "version:3.7.4.3" +> +wheel: < + name: "infra/python/wheels/typing-inspect-py3" + version: "version:0.7.1" +> +wheel: < + name: "infra/python/wheels/mypy-extensions-py3" + version: "version:0.4.3" +> +# Used by code coverage reporting tools in: +# //third_party/catapult/third_party/coverage/coverage/ +wheel: < + name: "infra/python/wheels/coverage/${vpython_platform}" + version: "version:5.5.chromium.3" +> +wheel: < + name: "infra/python/wheels/flask-py2_py3" + version: "version:1.0.2" +> +wheel: < + name: "infra/python/wheels/itsdangerous-py2_py3" + version: "version:1.1.0" +> +wheel: < + name: "infra/python/wheels/click-py2_py3" + version: "version:7.0" +> +wheel: < + name: "infra/python/wheels/werkzeug-py2_py3" + version: "version:1.0.1" +> +wheel: < + name: "infra/python/wheels/jinja2-py2_py3" + version: "version:2.10.1" +> +wheel: < + name: "infra/python/wheels/markupsafe/${vpython_platform}" + version: "version:1.1.1" +> +wheel: < + name: "infra/python/wheels/flask-talisman-py2_py3" + version: "version:0.7.0" +> +wheel: < + name: "infra/python/wheels/webtest-py2_py3" + version: "version:2.0.35" +> +wheel: < + name: "infra/python/wheels/beautifulsoup4-py3" + version: "version:4.9.0" +> +wheel: < + name: "infra/python/wheels/soupsieve-py2_py3" + version: "version:1.9.5" +> +wheel: < + name: "infra/python/wheels/webob-py2_py3" + version: "version:1.8.6" +> +wheel: < + name: "infra/python/wheels/waitress-py2_py3" + version: "version:1.4.3" +> +wheel: < + name: "infra/python/wheels/google-cloud-datastore-py3" + version: "version:2.1.6" +> +wheel: < + name: "infra/python/wheels/libcst-py3" + version: "version:0.3.19" +> +wheel: < + name: "infra/python/wheels/proto-plus-py3" + version: "version:1.20.3" +> +wheel: < + name: "infra/python/wheels/grpcio/${vpython_platform}" + version: "version:1.44.0" +> +wheel: < + name: "infra/python/wheels/grpc-google-iam-v1-py3" + version: "version:0.12.3" +> +wheel: < + name: "infra/python/wheels/google-cloud-audit-log-py2_py3" + version: "version:0.2.0" +> +wheel: < + name: "infra/python/wheels/google-auth-httplib2-py2_py3" + version: "version:0.1.0" +> +wheel: < + name: "infra/python/wheels/httplib2-py3" + version: "version:0.19.1" +> +wheel: < + name: "infra/python/wheels/uritemplate-py2_py3" + version: "version:3.0.0" +> +wheel: < + name: "infra/python/wheels/python-dateutil-py2_py3" + version: "version:2.7.3" +> +wheel: < + name: "infra/python/wheels/ijson/${vpython_platform}" + version: "version:3.2.3" +> +wheel: < + name: "infra/python/wheels/google-cloud-monitoring-py2_py3" + version: "version:2.9.1" +> +wheel: < + name: "infra/python/wheels/google-cloud-logging-py3" + version: "version:3.0.0" +> +wheel: < + name: "infra/python/wheels/google-cloud-appengine-logging-py2_py3" + version: "version:1.1.1" +> diff --git a/dashboard/Makefile b/dashboard/Makefile new file mode 100644 index 00000000000..236f17a2c1b --- /dev/null +++ b/dashboard/Makefile @@ -0,0 +1,19 @@ +PROTOC=protoc + +PY_PROTOS=dims_pb2.py pinpoint_chrome_health_results_pb2.py pinpoint_results_pb2.py sheriff_pb2.py sheriff_config_pb2.py + + +all: $(PY_PROTOS) ../tracing/tracing/proto/histogram_pb2.py + + +# We now depend on the tracing proto being defined. +../tracing/tracing/proto/histogram_pb2.py: + $(MAKE) -C ../tracing/tracing/proto histogram_pb2.py + +.PHONY: clean ../tracing/tracing/proto/histogram_pb2.py + +%_pb2.py: dashboard/protobuf/%.proto + $(PROTOC) --python_out=. $< + +clean: + rm -f $(PY_PROTOS); $(MAKE) -C ../tracing/tracing/proto clean diff --git a/dashboard/OWNERS b/dashboard/OWNERS index b90ddbab4ab..cca7d92c8b9 100644 --- a/dashboard/OWNERS +++ b/dashboard/OWNERS @@ -1,8 +1,10 @@ -abennetts@google.com -dberris@chromium.org -fancl@chromium.org +set noparent - -# For WebRTC related changes: -mbonadei@chromium.org -jleconte@google.com \ No newline at end of file +ashwinpv@google.com +eduardoyap@google.com +haowoo@google.com +johnchen@chromium.org +seanmccullough@google.com +sunpeng@google.com +sunxiaodi@google.com +wenbinzhang@google.com diff --git a/dashboard/PRESUBMIT.py b/dashboard/PRESUBMIT.py index a2d32a0c150..badab44a216 100644 --- a/dashboard/PRESUBMIT.py +++ b/dashboard/PRESUBMIT.py @@ -1,11 +1,14 @@ # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +# pylint: disable=invalid-name from __future__ import print_function from __future__ import division from __future__ import absolute_import +USE_PYTHON3 = True + def CheckChangeOnUpload(input_api, output_api): return _CommonChecks(input_api, output_api) @@ -16,7 +19,11 @@ def CheckChangeOnCommit(input_api, output_api): def _CommonChecks(input_api, output_api): - files_to_skip = input_api.DEFAULT_FILES_TO_SKIP + ('.*_pb2.py$',) + files_to_skip = input_api.DEFAULT_FILES_TO_SKIP + ( + '.*_pb2.py$', + '.*_pb2_grpc.py$', + '.*bq_export.*', + ) results = [] results += input_api.RunTests( input_api.canned_checks.GetPylint( @@ -24,7 +31,8 @@ def _CommonChecks(input_api, output_api): output_api, extra_paths_list=_GetPathsToPrepend(input_api), files_to_skip=files_to_skip, - pylintrc='pylintrc')) + pylintrc='pylintrc', + version='2.7')) return results diff --git a/dashboard/README.md b/dashboard/README.md index af1d414bfaa..995838cbf0d 100644 --- a/dashboard/README.md +++ b/dashboard/README.md @@ -9,6 +9,13 @@ and monitoring performance test results. - [Project glossary](/dashboard/docs/glossary.md) - [Pages and endpoints](/dashboard/docs/pages-and-endpoints.md) +## Running a local development server + +Running a local instance (i.e., a dev server) of the Performance Dashboard is +no longer supported due to a python2 dependency in the appengine toolchain. + +To manually test your python server changes, deploy them to chromeperf-stage. + ## Code Structure All dashboard code lives in the `dashboard/` subdirectory, with the endpoints @@ -36,6 +43,75 @@ subprojects which are also hosted in that directory: - `sheriff_config`: A standalone service for managing sheriff configurations hosted in git repositories, accessed through luci-config. +## Dependencies + +The dashboard has a few dependencies. Before running dashboard unit tests, +be sure to following all instructions under this section. + +### Google Cloud SDK + +The dashboard requires Python modules from Google Cloud SDK to run. +An easy way to install it is through cipd, using the following command. +You only need to do this once. +(You can replace `~/google-cloud-sdk` with another location if you prefer.) + +``` +echo infra/gae_sdk/python/all latest | cipd ensure -root ~/google-cloud-sdk -ensure-file - +``` + +Then run the following command to set `PYTHONPATH`. It is recommended to add +this to your `.bashrc` or equivalent. + +``` +export PYTHONPATH=~/google-cloud-sdk +``` + +If you already have a non-empty `PYTHONPATH`, you can add the Cloud SDK location +to it. However, dashboard does not require any additional Python libraries. +It is recommended that your `PYTHONPATH` only contains the cloud SDK while +testing the dashboard. + +(Note: The official source for Google Cloud SDK is https://cloud.google.com/sdk, +and you can install Python modules with +`gcloud components install app-engine-python`. +However, this method of installation has not been verified with the dashboard.) + +### Compile Protobuf Definitions + +The dashboard uses several protobuf (protocol buffer) definitions, which must be +compiled into Python modules. First you need to install the protobuf compiler, +and then use it to compile the protobuf definition files. + +To install the protobuf compiler, use the following command. +You only need to do this once. +(You can replace `~/protoc` with another location if you prefer.) + +``` +echo infra/tools/protoc/linux-amd64 protobuf_version:v3.6.1 | cipd ensure -root ~/protoc -ensure-file - +``` + +Afterwards, run the following commands to compile the protobuf definitions. +You need to do this whenever any of the protobuf definition files have changed. +Modify the first line below if your catapult directory is at a different +location. + +``` +catapult=~/chromium/src/third_party/catapult +~/protoc/protoc --proto_path $catapult/dashboard --python_out $catapult/dashboard $catapult/dashboard/dashboard/protobuf/sheriff.proto $catapult/dashboard/dashboard/protobuf/sheriff_config.proto +cp $catapult/dashboard/dashboard/protobuf/sheriff_pb2.py $catapult/dashboard/dashboard/sheriff_config/ +cp $catapult/dashboard/dashboard/protobuf/sheriff_config_pb2.py $catapult/dashboard/dashboard/sheriff_config/ +~/protoc/protoc --proto_path $catapult/tracing/tracing/proto --python_out $catapult/tracing/tracing/proto $catapult/tracing/tracing/proto/histogram.proto +``` + +## Unit Tests + +First following the steps given in Dependencies section above. +Then, run dashboard unit tests with: + +``` +dashboard/bin/run_py_tests +``` + ## Contact Bugs can be reported on the Chromium issue tracker using the `Speed>Dashboard` diff --git a/dashboard/api.yaml b/dashboard/api.yaml index 0b402d5b288..182e447d6d8 100644 --- a/dashboard/api.yaml +++ b/dashboard/api.yaml @@ -1,12 +1,10 @@ service: api -runtime: python27 -api_version: 1 -instance_class: F2 -# Individual api requests usually consume well under F2's 256MB memory limit, -# and the spa can fetch several api requests in parallel, so threadsafe:true -# allows this service to handle multiple requests in parallel to manage latency. -threadsafe: true +runtime: python38 +entrypoint: gunicorn -b:$PORT dashboard.dispatcher:APP --worker-class gthread --threads 10 --timeout 60 +app_engine_apis: true +# 2048MB Memory, 2.4GHz CPU limit, support auto scaling. +instance_class: F4_1G automatic_scaling: # We're setting the max concurrent request to 20, to allow AppEngine to scale @@ -16,26 +14,12 @@ automatic_scaling: max_concurrent_requests: 20 max_instances: 150 max_pending_latency: automatic - min_instances: 10 + min_instances: 3 target_cpu_utilization: 0.8 -libraries: -- name: jinja2 - version: "2.6" -- name: pycrypto - version: "2.6" -- name: webapp2 - version: "2.5.2" -- name: ssl - version: latest - env_variables: GAE_USE_SOCKETS_HTTPLIB: 'true' -builtins: -- appstats: on -- deferred: on - inbound_services: - warmup diff --git a/dashboard/app.yaml b/dashboard/app.yaml index c157a0a5f79..32c05346ac1 100644 --- a/dashboard/app.yaml +++ b/dashboard/app.yaml @@ -1,10 +1,13 @@ # Python Application Configuration # https://developers.google.com/appengine/docs/python/config/appconfig -runtime: python27 -api_version: 1 -threadsafe: false -instance_class: F4 +runtime: python38 + +entrypoint: gunicorn -b:$PORT dashboard.dispatcher:APP --worker-class gthread --threads 10 --timeout 60 +app_engine_apis: true +# 2048MB Memory, 2.4GHz CPU limit, support auto scaling. +instance_class: F4_1G + automatic_scaling: max_concurrent_requests: 80 max_instances: 150 @@ -12,26 +15,9 @@ automatic_scaling: min_instances: 1 target_cpu_utilization: 0.8 -libraries: -- name: jinja2 - version: "2.6" -- name: pycrypto - version: "2.6" -- name: webapp2 - version: "2.5.2" -- name: ssl - version: latest -- name: protobuf - version: "3.0.0" - env_variables: GAE_USE_SOCKETS_HTTPLIB: 'true' -builtins: -- remote_api: on -- appstats: on -- deferred: on - inbound_services: - warmup @@ -44,7 +30,6 @@ handlers: - url: /dashboard/static/ static_dir: dashboard/static/ secure: always - application_readable: true - url: /dashboard/elements/(.*\.html)$ static_files: dashboard/elements/\1 @@ -76,11 +61,6 @@ handlers: upload: gl-matrix-min.js secure: always -- url: /tsmon-client.js - static_files: tsmon-client.js - upload: tsmon-client.js - secure: always - # We need admin so only cron can trigger it. - url: /alert_groups_update script: dashboard.dispatcher.APP @@ -91,6 +71,4 @@ handlers: script: dashboard.dispatcher.APP secure: always -includes: -- scripts.yaml -- gae_ts_mon/gae_ts_mon # handles /internal/cron/ts_mon/send +# Need to check how we want to add the scripts.yaml diff --git a/dashboard/appengine_config.py b/dashboard/appengine_config.py deleted file mode 100644 index 3862b1895ba..00000000000 --- a/dashboard/appengine_config.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""App Engine config. - -This module is loaded before others and can be used to set up the -App Engine environment. See: - https://cloud.google.com/appengine/docs/python/tools/appengineconfig -""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import os - -from google.appengine.ext import vendor - -import dashboard - -# The names used below are special constant names which other code depends on. -# pylint: disable=invalid-name - -appstats_SHELL_OK = True -appstats_CALC_RPC_COSTS = True - -# Allows remote_api from the peng team to support the crosbolt dashboard. -remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = ('LOAS_PEER_USERNAME', - ['chromeos-peng-performance']) - - -def webapp_add_wsgi_middleware(app): - from google.appengine.ext.appstats import recording - app = recording.appstats_wsgi_middleware(app) - return app - - -# pylint: enable=invalid-name - - -def _AddThirdPartyLibraries(): - """Registers the third party libraries with App Engine. - - In order for third-party libraries to be available in the App Engine - runtime environment, they must be added with vendor.add. The directories - added this way must be inside the App Engine project directory. - """ - # The deploy script is expected to add links to third party libraries - # before deploying. If the directories aren't there (e.g. when running tests) - # then just ignore it. - for library_dir in dashboard.THIRD_PARTY_LIBRARIES: - if os.path.exists(library_dir): - vendor.add(os.path.join(os.path.dirname(__file__), library_dir)) - - -_AddThirdPartyLibraries() - -# This is at the bottom because datastore_hooks may depend on third_party -# modules. -from dashboard.common import datastore_hooks -datastore_hooks.InstallHooks() diff --git a/dashboard/bin/deploy b/dashboard/bin/deploy index a427c550be0..e243a92f6e2 100755 --- a/dashboard/bin/deploy +++ b/dashboard/bin/deploy @@ -1,16 +1,13 @@ -#!/usr/bin/python -# Copyright 2015 The Chromium Authors. All rights reserved. +#!/usr/bin/python3 +# Copyright 2022 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import logging import os -import shutil import subprocess import sys -import tempfile -import time def _AddToPathIfNeeded(path): @@ -55,7 +52,7 @@ def Main(args, extra_args): os.mkdir(viewer_dir_path) except OSError: pass - with open(viewer_html_path, 'w') as f: + with open(viewer_html_path, 'wb') as f: from tracing_build import vulcanize_histograms_viewer s = vulcanize_histograms_viewer.VulcanizeHistogramsViewer() f.write(s.encode('utf-8')) diff --git a/dashboard/bin/dev_server b/dashboard/bin/dev_server deleted file mode 100755 index 0b2523c03f7..00000000000 --- a/dashboard/bin/dev_server +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/python -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import os -import sys -import logging - - -def _AddToPathIfNeeded(path): - if path not in sys.path: - sys.path.insert(0, path) - - -def Main(): - catapult_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..')) - - _AddToPathIfNeeded(os.path.join(catapult_path, 'dashboard')) - import dashboard - paths = dashboard.PathsForDeployment() - - _AddToPathIfNeeded(catapult_path) - from catapult_build import appengine_dev_server, temp_deployment_dir - - # Build the necessary files in the directory. - from dashboard_build import preprocess - with temp_deployment_dir.TempDeploymentDir(paths) as tmp_dir: - preprocess.PackPinpoint(catapult_path, tmp_dir, paths) - - # The bundled dir is already copied, so don't have the deployment logic - # copy it again. - paths.remove(os.path.join(tmp_dir, 'bundled')) - appengine_dev_server.DevAppserver(paths, sys.argv[1:], reuse_path=tmp_dir) - - -if __name__ == '__main__': - logging.basicConfig( - stream=sys.stdout, - level=logging.INFO, - format='[%(asctime)s - %(levelname)s]: \t%(message)s') - Main() diff --git a/dashboard/bin/run_py_tests b/dashboard/bin/run_py_tests index ce153a77304..9733641f23e 100755 --- a/dashboard/bin/run_py_tests +++ b/dashboard/bin/run_py_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env vpython +#!/usr/bin/env vpython3 # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -27,9 +27,14 @@ if __name__ == '__main__': else: install.InstallHooks() + # PYTHONPATH points to a out-of-date cipd package. + if 'PYTHONPATH' in os.environ: + os.environ.pop('PYTHONPATH') + from catapult_build import run_with_typ import dashboard + root = 'dashboard' return_code = run_with_typ.Run( - os.path.join(_DASHBOARD_PATH, 'dashboard'), - path=dashboard.PathsForTesting()) + os.path.join(_DASHBOARD_PATH, root), + path=dashboard.PathsForTesting()) sys.exit(return_code) diff --git a/dashboard/bq_export/README.md b/dashboard/bq_export/README.md index 93102be1b84..4778d5e680e 100644 --- a/dashboard/bq_export/README.md +++ b/dashboard/bq_export/README.md @@ -89,6 +89,19 @@ $ PYTHONPATH=$PYTHONPATH:"$(pwd)/bq_export" python \ --temp_location=gs://chromeperf-dataflow-temp/export-testmetadata-daily ``` +``` +$ PYTHONPATH=$PYTHONPATH:"$(pwd)/bq_export" python \ + bq_export/delete_upload_tokens.py \ + --service_account_email=bigquery-exporter@chromeperf.iam.gserviceaccount.com \ + --runner=DataflowRunner \ + --region=us-central1 \ + --experiments=use_beam_bq_sink \ + --setup_file=bq_export/setup.py \ + --staging_location=gs://chromeperf-dataflow/staging \ + --template_location=gs://chromeperf-dataflow/templates/delete_upload_tokens \ + --temp_location=gs://chromeperf-dataflow-temp/delete-upload-tokens-tmp +``` + There are Cloud Scheduler jobs configured to run `gs://chromeperf-dataflow/templates/export_anomalies`, `gs://chromeperf-dataflow/templates/export_rows`, and @@ -108,22 +121,21 @@ page to manually re-run daily job. You can execute one-off jobs with the `gcloud` tool. For example: ``` -$ gcloud dataflow jobs run export-anomalies-example-job \ +$ gcloud dataflow jobs run export-anomalies-backfill \ --service-account-email=bigquery-exporter@chromeperf.iam.gserviceaccount.com \ --gcs-location=gs://chromeperf-dataflow/templates/export_anomalies \ --disable-public-ips \ --max-workers=10 \ --region=us-central1 \ --staging-location=gs://chromeperf-dataflow-temp/export_anomalies \ - --subnetwork=regions/us-central1/subnetworks/dashboard-batch \ - --parameters=experiments=shuffle_mode=service + --subnetwork=regions/us-central1/subnetworks/dashboard-batch ``` To execute a manual backfill, specify the `end_date` and/or `num_days` parameters. For example, this will regenerate the anomalies for December 2019: ``` -$ gcloud dataflow jobs run export-anomalies-backfill-example \ +$ gcloud dataflow jobs run export-anomalies-backfill \ --service-account-email=bigquery-exporter@chromeperf.iam.gserviceaccount.com \ --gcs-location=gs://chromeperf-dataflow/templates/export_anomalies \ --disable-public-ips \ @@ -131,9 +143,24 @@ $ gcloud dataflow jobs run export-anomalies-backfill-example \ --region=us-central1 \ --staging-location=gs://chromeperf-dataflow-temp/export_anomalies \ --subnetwork=regions/us-central1/subnetworks/dashboard-batch \ - --parameters=experiments=shuffle_mode=service,end_date=20191231,num_days=31 + --parameters=end_date=20191231,num_days=31 ``` +Example for row backfill: +``` +$ gcloud dataflow jobs run export-rows-backfill \ + --service-account-email=bigquery-exporter@chromeperf.iam.gserviceaccount.com \ --gcs-location=gs://chromeperf-dataflow/templates/export_rows \ + --disable-public-ips \ + --max-workers=70 \ + --region=us-central1 \ + --staging-location=gs:/chromeperf-dataflow-temp/export-rows-daily \ + --subnetwork=regions/us-central1/subnetworks/dashboard-batch \ + --parameters=end_date=20230710,num_days=1 \ + --worker-machine-type=e2-standard-4 +``` + +Due to the amount of data this job handles, it requires a more powerful worker machine and more concurrent workers. + **Tips:** * When testing changes to the pipelines add `table_suffix=_test` to the @@ -166,5 +193,5 @@ $ PYTHONPATH=$PYTHONPATH:"$(pwd)/bq_export" python bq_export/export_rows.py \ --setup_file=bq_export/setup.py \ --no_use_public_ips \ --subnetwork=regions/us-central1/subnetworks/dashboard-batch \ - --dataset=chromeperf_dashboard_rows + --dataset=chromeperf_dashboard_data ``` diff --git a/dashboard/bq_export/bq_export/bq_calc_stats.py b/dashboard/bq_export/bq_export/bq_calc_stats.py index c78678f3dff..a909ae0b07f 100644 --- a/dashboard/bq_export/bq_export/bq_calc_stats.py +++ b/dashboard/bq_export/bq_export/bq_calc_stats.py @@ -288,7 +288,7 @@ def main(): 'bot_group', 'bot', 'measurement')) # Emit results to stats_by_* tables in BigQuery. - """ + _ = """ CREATE TABLE `chromeperf.chromeperf_dashboard_data.stats_by_measurement_7d` (`date` DATE NOT NULL, @@ -336,7 +336,7 @@ def main(): ) PARTITION BY `date` CLUSTER BY bot_group, bot, measurement; - """ # pylint: disable=pointless-string-statement + """ bq_stats_by_measurement_schema = { 'fields': [ {'name': 'date', 'type': 'DATE', 'mode': 'REQUIRED'}, diff --git a/dashboard/bq_export/bq_export/bq_dash.py b/dashboard/bq_export/bq_export/bq_dash.py index e9b4f6b1983..8832240d7da 100644 --- a/dashboard/bq_export/bq_export/bq_dash.py +++ b/dashboard/bq_export/bq_export/bq_dash.py @@ -100,7 +100,7 @@ def AnomalyEntityToRowDict(entity): entities | 'ConvertEntityToRow(Anomaly)' >> beam.FlatMap(AnomalyEntityToRowDict)) - """ + _ = """ CREATE TABLE `chromeperf.chromeperf_dashboard_data.anomalies` (id INT64 NOT NULL, `timestamp` TIMESTAMP NOT NULL, @@ -129,7 +129,7 @@ def AnomalyEntityToRowDict(entity): earliest_input_timestamp TIMESTAMP, latest_input_timestamp TIMESTAMP) PARTITION BY DATE(`timestamp`); - """ # pylint: disable=pointless-string-statement + """ bq_anomaly_schema = { 'fields': [ { diff --git a/dashboard/bq_export/bq_export/bq_jobs.py b/dashboard/bq_export/bq_export/bq_jobs.py index 832079fd1e3..8ba569df05b 100644 --- a/dashboard/bq_export/bq_export/bq_jobs.py +++ b/dashboard/bq_export/bq_export/bq_jobs.py @@ -9,6 +9,7 @@ import json import logging +import six import apache_beam as beam from apache_beam.options.pipeline_options import DebugOptions @@ -104,7 +105,7 @@ def JobEntityToRowDict(entity): _IfNone(entity.get('use_execution_engine'), False), } except KeyError as e: - raise UnconvertibleJobError('Missing property: ' + str(e)) + six.raise_from(UnconvertibleJobError('Missing property: ' + str(e)), e) # Computed properties, directly translated from the ComputedProperty # definitions of the ndb.Model. d['completed'] = bool( @@ -153,7 +154,7 @@ def ConvertEntity(entity): job_dicts = ( job_entities | 'ConvertEntityToRow(Job)' >> beam.FlatMap(ConvertEntity)) - """ + _ = """ CREATE TABLE `chromeperf.chromeperf_dashboard_data.jobs` (id INT64 NOT NULL, arguments STRING NOT NULL, @@ -185,7 +186,7 @@ def ConvertEntity(entity): configuration STRING, batch_id STRING) PARTITION BY DATE(`create_time`); - """ # pylint: disable=pointless-string-statement + """ bq_job_schema = { 'fields': [ { diff --git a/dashboard/bq_export/bq_export/bq_rows.py b/dashboard/bq_export/bq_export/bq_rows.py index b33e807b4e2..200da9fca81 100644 --- a/dashboard/bq_export/bq_export/bq_rows.py +++ b/dashboard/bq_export/bq_export/bq_rows.py @@ -41,7 +41,7 @@ def main(): 'main', 'multiple_histograms_for_row') orphaned_histogram = Metrics.counter('main', 'orphaned_histogram') - """ + _ = """ CREATE TABLE `chromeperf.chromeperf_dashboard_rows.` (revision INT64 NOT NULL, value FLOAT64 NOT NULL, @@ -55,7 +55,7 @@ def main(): sample_values ARRAY) PARTITION BY DATE(`timestamp`) CLUSTER BY master, bot, measurement; - """ # pylint: disable=pointless-string-statement + """ bq_row_schema = { 'fields': [ { @@ -127,6 +127,8 @@ def RowEntityToRowDict(entity): if key in d or key in ['parent_test', 'error']: # skip properties with dedicated columns. continue + if isinstance(value, datetime.date): + value = value.isoformat() if isinstance(value, float): value = FloatHack(value) properties[key] = value @@ -189,7 +191,7 @@ def HistogramEntityToDict(entity): count = len(sample_values) sample_values = [v for v in sample_values if v is not None] if len(sample_values) != count: - logging.getLogger().warn( + logging.getLogger().warning( 'Histogram data.sampleValues contains null: %r', entity.key) for v in sample_values: if not isinstance(v, (int, float)): @@ -238,7 +240,7 @@ def MergeRowAndSampleValues(element): orphaned_histogram.inc() logging.getLogger().error("No Row for Histogram(s) (%r)", group_key) return [] - elif len(rows) > 1: + if len(rows) > 1: row_conflicts.inc() logging.getLogger().error("Multiple rows (%d) for %r", len(rows), group_key) diff --git a/dashboard/bq_export/bq_export/bq_testmetadata.py b/dashboard/bq_export/bq_export/bq_testmetadata.py index 7fbddc28145..3e382e2e0eb 100644 --- a/dashboard/bq_export/bq_export/bq_testmetadata.py +++ b/dashboard/bq_export/bq_export/bq_testmetadata.py @@ -7,7 +7,7 @@ from __future__ import division from __future__ import print_function - +import six import apache_beam as beam from apache_beam.io.gcp.datastore.v1new.datastoreio import ReadFromDatastore from apache_beam.io.gcp.datastore.v1new.types import Query @@ -57,7 +57,7 @@ def TestMetadataEntityToRowDict(entity): 'unescaped_story_name': props.get('unescaped_story_name'), } except KeyError as e: - raise UnconvertibleEntityError('Missing property: ' + str(e)) + six.raise_from(UnconvertibleEntityError('Missing property: ' + str(e)), e) # Computed properties, directly translated from the ComputedProperty # definitions of the ndb.Model. parts = d['test'].split('/') @@ -103,7 +103,7 @@ def main(): failed_entity_transforms)) ) - """ + _ = """ CREATE TABLE `chromeperf.chromeperf_dashboard_data.test_metadata` (test STRING NOT NULL, internal_only BOOLEAN NOT NULL, @@ -119,7 +119,7 @@ def main(): measurement STRING NOT NULL, ) CLUSTER BY bot_group, bot, measurement; - """ # pylint: disable=pointless-string-statement + """ bq_testmetadata_schema = { 'fields': [ # 'test' corresponds to the same column in the Rows export. diff --git a/dashboard/bq_export/bq_export/export_options.py b/dashboard/bq_export/bq_export/export_options.py index 70124d327cb..010657e2cdb 100644 --- a/dashboard/bq_export/bq_export/export_options.py +++ b/dashboard/bq_export/bq_export/export_options.py @@ -46,7 +46,7 @@ def GetTimeRangeProvider(self): return _TimeRangeProvider(self.end_date, self.num_days) -class _TimeRangeProvider(object): +class _TimeRangeProvider: """A ValueProvider-like based on the end_date and num_days ValueProviders. This class is a workaround for the lack of NestedValueProviders in Beam's diff --git a/dashboard/bq_export/bq_export/split_by_timestamp.py b/dashboard/bq_export/bq_export/split_by_timestamp.py index 575c4dbaac7..d84886451a7 100644 --- a/dashboard/bq_export/bq_export/split_by_timestamp.py +++ b/dashboard/bq_export/bq_export/split_by_timestamp.py @@ -48,7 +48,7 @@ def __init__(self, :timestamp_property: a str of the name of the timestamp property to filter on. """ - super(ReadTimestampRangeFromDatastore, self).__init__() + super().__init__() self._query_params = query_params self._time_range_provider = time_range_provider self._step = step @@ -69,7 +69,7 @@ def expand(self, pcoll): # pylint: disable=invalid-name class _QueryFn(beam.DoFn): def __init__(self, query_params, timestamp_property): - super(ReadTimestampRangeFromDatastore._QueryFn, self).__init__() + super().__init__() self._query_params = query_params self._timestamp_property = timestamp_property diff --git a/dashboard/bq_export/delete_upload_tokens.py b/dashboard/bq_export/delete_upload_tokens.py new file mode 100644 index 00000000000..b4eca980c24 --- /dev/null +++ b/dashboard/bq_export/delete_upload_tokens.py @@ -0,0 +1,231 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import apache_beam as beam +import logging + +from apache_beam import metrics +from apache_beam.io.gcp.datastore.v1new import datastoreio +from apache_beam.io.gcp.datastore.v1new.types import Query +from apache_beam.options.pipeline_options import GoogleCloudOptions +from apache_beam.options.pipeline_options import PipelineOptions + +DATETIME_FORMAT = '%Y-%m-%d:%H:%M:%S%z' + + +class TokenSelectionOptions(PipelineOptions): + + @classmethod + def _add_argparse_args(cls, parser): + parser.add_value_provider_argument( + '--max_lifetime', + help=('The duration of time an UploadToken should be kept in ' + 'the Datstore, expressed as a string in hours or ' + 'minutes or combinations (e.g. 1h30m)'), + default='6h', + ) + parser.add_value_provider_argument( + '--reference_time', + help=(f'A datetime-parseable reference time, in this ' + f'format: {DATETIME_FORMAT} -- if empty means ' + f'"now".'), + default='', + ) + + def get_selection_provider(self): + return _SelectionProvider(self.max_lifetime, self.reference_time) + + +class _SelectionProvider: + + def __init__(self, max_lifetime, reference_time): + self._max_lifetime = max_lifetime + self._reference_time = reference_time + self._cached_max_lifetime = None + self._cached_reference_time = None + + @property + def max_lifetime(self): + import datetime + import re + if self._cached_max_lifetime is None: + # Support hours and minutes and combinations of each. + result = re.match(r'((?P\d+)[Hh])?((?P\d+)[Mm])?', + self._max_lifetime.get()) + hours = result.group('hours') or '0' + minutes = result.group('minutes') or '0' + self._cached_max_lifetime = datetime.timedelta( + minutes=int(minutes), hours=int(hours)) + return self._cached_max_lifetime + + @property + def reference_time(self): + import datetime + if self._cached_reference_time is None: + ref_time = self._reference_time.get() + if not ref_time: + self._cached_reference_time = datetime.datetime.now( + tz=datetime.timezone.utc) + else: + self._cached_reference_time = datetime.datetime.strptime( + ref_time, DATETIME_FORMAT) + return self._cached_reference_time + + def __str__(self): + return (f'_SelectionProvider(max_lifetime={self._max_lifetime},' + f'ref={self._reference_time})') + + +def select_expired_tokens( + raw_tokens: beam.Pipeline, + raw_measurements: beam.Pipeline, + selection_provider: _SelectionProvider, +): + missing_token_measurements_count = metrics.Metrics.counter( + 'select', 'missing_token_measurements_count') + deleted_tokens = metrics.Metrics.counter('select', 'deleted_tokens') + deleted_measurements = metrics.Metrics.counter('select', + 'deleted_measurements') + + def extract_update_timestamp(token): + return (token.key.to_client_key().id, token) + + tokens = ( + raw_tokens + | 'ExtractTokenKey' >> beam.Map(extract_update_timestamp)) + + def extract_associated_token(measurement_entity, missing_counter): + measurement = measurement_entity.to_client_entity() + token_key = measurement.get('token') + if not token_key: + missing_counter.inc() + token_key = '(unspecified)' + else: + token_key = token_key.id + return (token_key, measurement_entity.key) + + measurements = ( + raw_measurements + | 'ExtractAssociatedToken' >> beam.Map(extract_associated_token, + missing_token_measurements_count)) + + # We'll collect all `Measurement` keys by the 'Token' key. + measurements_by_token = (({ + 'token': tokens, + 'measurements': measurements, + }) + | 'Merge' >> beam.CoGroupByKey()) + + expired_tokens_counter = metrics.Metrics.counter('select', 'expired_tokens') + + def expired_token_selector(keyed_token_and_measurements, selection_provider): + _, token_and_measurements = keyed_token_and_measurements + tokens = token_and_measurements['token'] + + # This means we have already deleted the token for these + # measurements, so we'll always delete these measurements. + if not tokens: + expired_tokens_counter.inc() + return True + token = token_and_measurements['token'][0].to_client_entity() + lifetime = (selection_provider.reference_time - token['update_time']) + if lifetime >= selection_provider.max_lifetime: + expired_tokens_counter.inc() + return True + return False + + # We return two PCollection instances, one with just the expired tokens and + # the other the expired measurements. + expired_tokens = ( + measurements_by_token + | 'SelectExpiredTokens' >> beam.Filter(expired_token_selector, + selection_provider)) + + def extract_token_key(keyed_token_and_measurements, counter): + _, token_and_measurements = keyed_token_and_measurements + tokens = token_and_measurements['token'] + res = [t.key for t in tokens] + counter.inc(len(res)) + return res + + def pick_nonempty_tokens(keyed_token_and_measurements): + token_key, _ = keyed_token_and_measurements + return token_key != '(unspecified)' or len(token_key) > 0 + + tokens_to_delete = ( + expired_tokens + | 'PickNonEmptyTokens' >> beam.Filter(pick_nonempty_tokens) + | 'ExtractTokenKeys' >> beam.FlatMap(extract_token_key, deleted_tokens)) + + def extract_measurement(keyed_token_and_measurements, counter): + _, token_and_measurements = keyed_token_and_measurements + res = token_and_measurements['measurements'] + counter.inc(len(res)) + return res + + measurements_to_delete = ( + expired_tokens + | 'ExtractMeasurementKeys' >> beam.FlatMap(extract_measurement, + deleted_measurements)) + + return tokens_to_delete, measurements_to_delete + + +class CountInput(beam.DoFn): + + def __init__(self, counter): + self._counter = counter + + def process(self, input): + self._counter.inc() + yield input + + +def main(): + project = 'chromeperf' + options = PipelineOptions() + options.view_as(GoogleCloudOptions).project = project + selection_options = options.view_as(TokenSelectionOptions) + + p = beam.Pipeline(options=options) + token_count = metrics.Metrics.counter('main', 'tokens_read') + measurement_count = metrics.Metrics.counter('main', 'measurements_read') + + raw_tokens = ( + p | 'ReadUploadTokens' >> + datastoreio.ReadFromDatastore(query=Query(kind='Token', project=project))) + + # Count the tokens. + _ = (raw_tokens | 'CountTokens' >> beam.ParDo(CountInput(token_count))) + + raw_measurements = ( + p + | 'ReadMeasurements' >> datastoreio.ReadFromDatastore( + query=Query(kind='Measurement', project=project))) + + # Count the measurements. + _ = ( + raw_measurements + | 'CountMeasurements' >> beam.ParDo(CountInput(measurement_count))) + + tokens_to_delete, measurements_to_delete = select_expired_tokens( + raw_tokens, raw_measurements, selection_options.get_selection_provider()) + + _ = ( + tokens_to_delete + | 'DeleteTokens' >> datastoreio.DeleteFromDatastore(project)) + + _ = ( + measurements_to_delete + | 'ReshuffleMeasurements' >> beam.Reshuffle() + | 'DeleteMeasurements' >> datastoreio.DeleteFromDatastore(project)) + + # Run the pipeline! + result = p.run() + result.wait_until_finish() + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + main() diff --git a/dashboard/cloudbuild-dashboard-push-on-green-py3.yaml b/dashboard/cloudbuild-dashboard-push-on-green-py3.yaml new file mode 100644 index 00000000000..df3cec96a60 --- /dev/null +++ b/dashboard/cloudbuild-dashboard-push-on-green-py3.yaml @@ -0,0 +1,61 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 1800s # Wait for 30 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: + - '-c' + - | + docker pull gcr.io/$PROJECT_ID/dashboard-base:latest || exit 0 +- name: 'gcr.io/cloud-builders/docker' + dir: 'dashboard/dev_dockerfiles' + args: [ + 'build', + '-t', 'dashboard-base:latest', + '-t', 'gcr.io/$PROJECT_ID/dashboard-base:latest', + '--cache-from', 'gcr.io/$PROJECT_ID/dashboard-base:latest', + '.' + ] +- name: 'gcr.io/$PROJECT_ID/docker-compose' + dir: 'dashboard/dev_dockerfiles' + args: [ + 'run', 'python-unittest-dashboard' + ] +# We need to provide the auth token that the service account is using to the +# container from which we're going to deploy the Dashboard services. +- name: 'gcr.io/$PROJECT_ID/docker-compose' + dir: 'dashboard/dev_dockerfiles' + args: [ + 'run', 'cloudbuild-prepare-deployment' + ] +- name: 'gcr.io/cloud-builders/gcloud' + dir: 'deploy-dashboard' + args: [ + app, deploy, '--no-promote', '--version', 'cloud-build-${SHORT_SHA}', + # We enumerate the files we need to deploy just for the dashboard. + # TODO(dberris): Figure out how we can include cron.yaml and dispatch.yaml + # from this automation. This fails in production with the service account + # used by cloud-build, so we've left it out for now. + api.yaml, + app.yaml, + upload-processing.yaml, + upload.yaml, + pinpoint.yaml, + ] +- name: 'gcr.io/cloud-builders/gcloud' + dir: 'deploy-dashboard' + args: [ + 'app', 'deploy', 'queue.yaml', + ] +- name: 'gcr.io/cloud-builders/gcloud' + entrypoint: '/bin/bash' + args: [ + '-x', '-e', 'dashboard/dev_dockerfiles/cleanup_versions.sh', + '21', 'api', 'default', 'pinpoint', 'upload-processing', 'upload' + ] +images: ['gcr.io/$PROJECT_ID/dashboard-base:latest'] diff --git a/dashboard/cloudbuild-dashboard-push-on-green.yaml b/dashboard/cloudbuild-dashboard-push-on-green.yaml deleted file mode 100644 index 2adf1b6d01b..00000000000 --- a/dashboard/cloudbuild-dashboard-push-on-green.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# These are the testing and deploy steps for the performance dashboard -# services. We re-use the docker-compose files in the dev_dockerfiles directory -# to ensure we're runing the same test and deploy cycle everytime. -timeout: 1800s # Wait for 30 minutes for the whole process to finish. -options: - diskSizeGb: 100 - machineType: 'N1_HIGHCPU_8' -steps: -- name: 'gcr.io/cloud-builders/docker' - entrypoint: 'bash' - args: - - '-c' - - | - docker pull gcr.io/$PROJECT_ID/dashboard-base:latest || exit 0 -- name: 'gcr.io/cloud-builders/docker' - dir: 'dashboard/dev_dockerfiles' - args: [ - 'build', - '-t', 'dashboard-base:latest', - '-t', 'gcr.io/$PROJECT_ID/dashboard-base:latest', - '--cache-from', 'gcr.io/$PROJECT_ID/dashboard-base:latest', - '.' - ] -- name: 'gcr.io/$PROJECT_ID/docker-compose' - dir: 'dashboard/dev_dockerfiles' - args: [ - 'run', 'python-unittest-dashboard' - ] -# We need to provide the auth token that the service account is using to the -# container from which we're going to deploy the Dashboard services. -- name: 'gcr.io/$PROJECT_ID/docker-compose' - dir: 'dashboard/dev_dockerfiles' - args: [ - 'run', 'cloudbuild-prepare-deployment' - ] -- name: 'gcr.io/cloud-builders/gcloud' - entrypoint: '/bin/bash' - args: [ - '-x', '-e', 'dashboard/dev_dockerfiles/cleanup_versions.sh', - 'api', 'default', 'pinpoint', 'upload-processing', 'upload' - ] -- name: 'gcr.io/cloud-builders/gcloud' - dir: 'deploy-dashboard' - args: [ - app, deploy, '--no-promote', '--version', 'cloud-build-${SHORT_SHA}', - # We enumerate the files we need to deploy just for the dashboard. - # TODO(dberris): Figure out how we can include cron.yaml and dispatch.yaml - # from this automation. This fails in production with the service account - # used by cloud-build, so we've left it out for now. - api.yaml, - app.yaml, - upload-processing.yaml, - upload.yaml, - # Deploy Pinpoint as well. - pinpoint.yaml, - ] -images: ['gcr.io/$PROJECT_ID/dashboard-base:latest'] \ No newline at end of file diff --git a/dashboard/cloudbuild-sheriff-config-testing.yaml b/dashboard/cloudbuild-sheriff-config-testing.yaml index 8e78af9449f..5dd50284604 100644 --- a/dashboard/cloudbuild-sheriff-config-testing.yaml +++ b/dashboard/cloudbuild-sheriff-config-testing.yaml @@ -5,10 +5,10 @@ timeout: 1800s # Wait for 30 minutes for the whole build to finish. steps: - name: 'gcr.io/chromeperf/protoc' args: [ - '-I', 'dashboard/dashboard/proto', + '-I', 'dashboard', '--python_out', 'dashboard/dashboard/sheriff_config', - 'dashboard/dashboard/proto/sheriff.proto', - 'dashboard/dashboard/proto/sheriff_config.proto' + 'dashboard/dashboard/protobuf/sheriff.proto', + 'dashboard/dashboard/protobuf/sheriff_config.proto' ] - name: 'docker/compose:1.19.0' args: [ @@ -38,8 +38,9 @@ steps: timeout: 900s # Wait for 15 minutes for cleanup - name: 'gcr.io/cloud-builders/gcloud' args: [ - 'beta', 'app', 'deploy', 'dashboard/dashboard/sheriff_config/app.yaml', - # Provide the short git commit to the version of the deployment. - '--version', 'cloud-build-${SHORT_SHA}', '--promote' + app, deploy, '--no-promote', '--version', 'cloud-build-${SHORT_SHA}', + 'dashboard/dashboard/sheriff_config/app.yaml', + # Debug flags. + '--log-http', '--verbosity=debug' ] timeout: 900s # Wait for 15 minutes for the deploy to finish. diff --git a/dashboard/cloudbuild_traffic/api.yaml b/dashboard/cloudbuild_traffic/api.yaml new file mode 100644 index 00000000000..1c84dfa5f05 --- /dev/null +++ b/dashboard/cloudbuild_traffic/api.yaml @@ -0,0 +1,19 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# We check in the target versions to avoid unintended traffic changes. +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for api service' + dir: 'deploy-dashboard' + args: [ + 'app', 'services', 'set-traffic', 'api', + '--splits=${_SERVICE_VERSION}=1' + ] + +substitutions: + _SERVICE_VERSION: 'cloud-build-bd311c3' diff --git a/dashboard/cloudbuild_traffic/default.yaml b/dashboard/cloudbuild_traffic/default.yaml new file mode 100644 index 00000000000..5a47ac2a512 --- /dev/null +++ b/dashboard/cloudbuild_traffic/default.yaml @@ -0,0 +1,18 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# We check in the target versions to avoid unintended traffic changes. +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for default service' + dir: 'deploy-dashboard' + args: [ + 'app', 'services', 'set-traffic', 'default', + '--splits=${_SERVICE_VERSION}=1' + ] +substitutions: + _SERVICE_VERSION: 'cloud-build-bd311c3' diff --git a/dashboard/cloudbuild_traffic/pinpoint.yaml b/dashboard/cloudbuild_traffic/pinpoint.yaml new file mode 100644 index 00000000000..4a1c1aa587f --- /dev/null +++ b/dashboard/cloudbuild_traffic/pinpoint.yaml @@ -0,0 +1,18 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# We check in the target versions to avoid unintended traffic changes. +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for pinpoint service' + dir: 'deploy-dashboard' + args: [ + 'app', 'services', 'set-traffic', 'pinpoint', + '--splits=${_SERVICE_VERSION}=1' + ] +substitutions: + _SERVICE_VERSION: 'cloud-build-bd311c3' diff --git a/dashboard/cloudbuild_traffic/sheriff-config.yaml b/dashboard/cloudbuild_traffic/sheriff-config.yaml new file mode 100644 index 00000000000..b2bcca222d1 --- /dev/null +++ b/dashboard/cloudbuild_traffic/sheriff-config.yaml @@ -0,0 +1,26 @@ +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# Start the version so that it is ready to receive traffic +- name: 'gcr.io/cloud-builders/gcloud' + args: [ + app, versions, 'start', '-s', 'sheriff-config', '${_SERVICE_VERSION}' + ] +# Set the traffic to the new version +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for sheriff-config service' + args: [ + 'app', 'services', 'set-traffic', 'sheriff-config', + '--splits=${_SERVICE_VERSION}=1' + ] +# Stop older versions to release instances +- name: 'gcr.io/cloud-builders/gcloud' + entrypoint: '/bin/bash' + args: [ + '-x', '-e', 'dashboard/dev_dockerfiles/stop_old_versions.sh', + 'sheriff-config' + ] +substitutions: + _SERVICE_VERSION: 'cloud-build-4ca9d09' diff --git a/dashboard/cloudbuild_traffic/upload-processing.yaml b/dashboard/cloudbuild_traffic/upload-processing.yaml new file mode 100644 index 00000000000..4e9d8a3b8f5 --- /dev/null +++ b/dashboard/cloudbuild_traffic/upload-processing.yaml @@ -0,0 +1,18 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# We check in the target versions to avoid unintended traffic changes. +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for upload-processing service' + dir: 'deploy-dashboard' + args: [ + 'app', 'services', 'set-traffic', 'upload-processing', + '--splits=${_SERVICE_VERSION}=1' + ] +substitutions: + _SERVICE_VERSION: 'cloud-build-bd311c3' diff --git a/dashboard/cloudbuild_traffic/upload.yaml b/dashboard/cloudbuild_traffic/upload.yaml new file mode 100644 index 00000000000..cf51e0fcf6a --- /dev/null +++ b/dashboard/cloudbuild_traffic/upload.yaml @@ -0,0 +1,18 @@ +# These are the testing and deploy steps for the performance dashboard +# services. We re-use the docker-compose files in the dev_dockerfiles directory +# to ensure we're runing the same test and deploy cycle everytime. +timeout: 600s # Wait for 10 minutes for the whole process to finish. +options: + diskSizeGb: 100 + machineType: 'N1_HIGHCPU_8' +steps: +# We check in the target versions to avoid unintended traffic changes. +- name: 'gcr.io/cloud-builders/gcloud' + id: 'Set traffic for upload service' + dir: 'deploy-dashboard' + args: [ + 'app', 'services', 'set-traffic', 'upload', + '--splits=${_SERVICE_VERSION}=1' + ] +substitutions: + _SERVICE_VERSION: 'cloud-build-bd311c3' diff --git a/dashboard/cron-stage.yaml b/dashboard/cron-stage.yaml new file mode 100644 index 00000000000..6d0027899c0 --- /dev/null +++ b/dashboard/cron-stage.yaml @@ -0,0 +1,67 @@ +# Scheduled Tasks with Cron +# https://developers.google.com/appengine/docs/python/config/cron + +cron: +- description: Mark alerts as recovered. + url: /mark_recovered_alerts + schedule: every 3 hours + +- description: Update dashboard stats. + url: /update_dashboard_stats + schedule: every 24 hours + +- description: Delete expired layered_cache.CachedPickledString entities. + url: /delete_expired_entities + schedule: every 24 hours + +- description: Restart pinpoint jobs that encounter failures + url: /cron/refresh-jobs + schedule: every 15 minutes + target: pinpoint + +- description: Purge Pinpoint's expired isolates from datastore. + url: /cron/isolate-cleanup + schedule: every 24 hours + target: pinpoint + +- description: Update sandwich verification results for Pinpoint's bisection culprits. + url: /cron/update-culprit-verification-results + schedule: every 10 minutes + target: pinpoint + +- description: Poll luci-config for sheriff configurations. + url: /configs/update + schedule: every 1 minutes + +- description: Poll the Pinpoint FIFO scheduler. + url: /cron/fifo-scheduler + schedule: every 1 minutes + target: pinpoint + +# The backup cron job is disabled temporarily in order to check whether +# it is responsible for increased cost on the weekends. +# See: https://github.com/catapult-project/catapult/issues/1944 +# +# Scheduled backup. +# If you add new datastore kinds and want them to be backed up, +# you must add kind=argument to the URL below. Backups are available at: +# https://console.developers.google.com/datastore/settings?project=chromeperf +# See: https://cloud.google.com/appengine/articles/scheduled_backups +#- description: Back up all entities in the datastore. +# url: "/_ah/datastore_admin/backup.create?name=ScheduledBackup\ +#&kind=Master\ +#&kind=Bot\ +#&kind=Test\ +#&kind=Row\ +#&kind=Sheriff\ +#&kind=AnomalyConfig\ +#&kind=Anomaly\ +#&kind=IpWhitelist\ +#&kind=BotWhitelist\ +#&kind=BugLabelPatterns\ +#&kind=MultipartEntity\ +#&kind=PartEntity\ +#&filesystem=gs\ +#&gs_bucket_name=chromeperf.appspot.com" +# schedule: every saturday 05:00 +# target: ah-builtin-python-bundle diff --git a/dashboard/cron.yaml b/dashboard/cron.yaml index a00a8bb10eb..5cb7bd632d2 100644 --- a/dashboard/cron.yaml +++ b/dashboard/cron.yaml @@ -40,7 +40,7 @@ cron: - description: Restart pinpoint jobs that encounter failures url: /cron/refresh-jobs - schedule: every 30 minutes + schedule: every 15 minutes target: pinpoint - description: Purge Pinpoint's expired isolates from datastore. @@ -48,6 +48,11 @@ cron: schedule: every 24 hours target: pinpoint +- description: Update sandwich verification results for Pinpoint's bisection culprits. + url: /cron/update-culprit-verification-results + schedule: every 10 minutes + target: pinpoint + - description: Poll luci-config for sheriff configurations. url: /configs/update schedule: every 1 minutes diff --git a/dashboard/dashboard/Makefile b/dashboard/dashboard/Makefile deleted file mode 100644 index 2e4d710355f..00000000000 --- a/dashboard/dashboard/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -PROTOC=protoc - -PY_PROTOS=results_pb2.py sheriff_pb2.py sheriff_config_pb2.py - - -all: $(PY_PROTOS) ../../tracing/tracing/proto/histogram_pb2.py - -# We now depend on the tracing proto being defined. -../../tracing/tracing/proto/histogram_pb2.py: - $(MAKE) -C ../../tracing/tracing/proto histogram_pb2.py - -.PHONY: clean ../../tracing/tracing/proto/histogram_pb2.py - -%_pb2.py: proto/%.proto - $(PROTOC) -Iproto -I. --python_out=. $< - -clean: - rm -f $(PY_PROTOS); $(MAKE) -C ../../tracing/tracing/proto clean diff --git a/dashboard/dashboard/__init__.py b/dashboard/dashboard/__init__.py index b2e7851d562..379ece71ada 100644 --- a/dashboard/dashboard/__init__.py +++ b/dashboard/dashboard/__init__.py @@ -14,62 +14,29 @@ # Directories in catapult/third_party required by dashboard. THIRD_PARTY_LIBRARIES = [ - 'apiclient', - 'beautifulsoup4', - 'cachetools', - 'certifi', - 'chardet', 'cloudstorage', 'depot_tools', 'flot', 'gae_ts_mon', - 'google-auth', - 'graphy', - 'html5lib-python', - 'idna', - 'ijson', 'jquery', - 'mapreduce', - 'mock', - 'oauth2client', - 'pipeline', 'polymer', 'polymer-svg-template', 'polymer2/bower_components', 'polymer2/bower_components/chopsui', - 'pyasn1', - 'pyasn1_modules', - 'pyparsing', 'redux/redux.min.js', - 'requests', - 'requests_toolbelt', - 'rsa', - 'six', - 'uritemplate', - 'urllib3', - 'webapp2', - 'webtest', -] - -THIRD_PARTY_LIBRARIES_PY2 = THIRD_PARTY_LIBRARIES + [ - 'httplib2/python2/httplib2' -] - -THIRD_PARTY_LIBRARIES_PY3 = THIRD_PARTY_LIBRARIES + [ - 'httplib2/python3/httplib2' ] # Files and directories in catapult/dashboard. DASHBOARD_FILES = [ 'api.yaml', 'app.yaml', - 'appengine_config.py', 'cron.yaml', 'dashboard', 'dispatch.yaml', 'index.yaml', 'pinpoint.yaml', 'queue.yaml', + 'requirements.txt', 'scripts.yaml', 'upload-processing.yaml', 'upload.yaml', @@ -138,19 +105,17 @@ def _AllSdkThirdPartyLibraryPaths(): appengine_path = os.path.join(sdk_bin_path, 'platform', 'google_appengine') paths.append(appengine_path) sys.path.insert(0, appengine_path) - break try: - import dev_appserver + # pylint: disable=import-outside-toplevel + import google.appengine # pylint: disable=unused-import except ImportError: # TODO: Put the Cloud SDK in the path with the binary dependency manager. # https://github.com/catapult-project/catapult/issues/2135 - print('This script requires the Google Cloud SDK to be in PATH.') - print('Install at https://cloud.google.com/sdk and then run') - print('`gcloud components install app-engine-python`') + print('This script requires the Google Cloud SDK to be in PYTHONPATH.') + print('See https://chromium.googlesource.com/catapult/' + '+/HEAD/dashboard/README.md') sys.exit(1) - - paths.extend(dev_appserver.EXTRA_PATHS) return paths @@ -159,12 +124,8 @@ def _CatapultThirdPartyLibraryPaths(): paths = [] paths.append( os.path.join(_CATAPULT_PATH, 'common', 'node_runner', 'node_runner', - 'node_modules', '@chopsui', 'tsmon-client', - 'tsmon-client.js')) - third_party_libraries = ( - THIRD_PARTY_LIBRARIES_PY3 if sys.version_info.major == 3 - else THIRD_PARTY_LIBRARIES_PY2) - for library in third_party_libraries: + 'node_modules', '@chopsui')) + for library in THIRD_PARTY_LIBRARIES: paths.append(os.path.join(_CATAPULT_PATH, 'third_party', library)) return paths diff --git a/dashboard/dashboard/add_histograms.py b/dashboard/dashboard/add_histograms.py index ee08a5e21d6..f85fba1c5b5 100644 --- a/dashboard/dashboard/add_histograms.py +++ b/dashboard/dashboard/add_histograms.py @@ -6,7 +6,6 @@ from __future__ import division from __future__ import absolute_import -import cloudstorage import decimal import ijson import json @@ -15,15 +14,20 @@ import sys import uuid import zlib +try: + import cloudstorage.cloudstorage as cloudstorage +except ImportError: + # This is a work around to fix the discrepency on file tree in tests. + import cloudstorage from google.appengine.api import taskqueue from google.appengine.ext import ndb from dashboard import sheriff_config_client +from dashboard.api import api_auth from dashboard.api import api_request_handler from dashboard.common import datastore_hooks from dashboard.common import histogram_helpers -from dashboard.common import request_handler from dashboard.common import timing from dashboard.common import utils from dashboard.models import graph_data @@ -33,6 +37,8 @@ from tracing.value.diagnostics import diagnostic from tracing.value.diagnostics import reserved_infos +from flask import make_response, request + TASK_QUEUE_NAME = 'histograms-queue' _RETRY_PARAMS = cloudstorage.RetryParams(backoff_factor=1.1) @@ -40,60 +46,115 @@ _ZLIB_BUFFER_SIZE = 4096 -def _CheckRequest(condition, msg): - if not condition: - raise api_request_handler.BadRequestError(msg) - - -class DecompressFileWrapper(object): - """A file-like object implementing inline decompression. - - This class wraps a file-like object and does chunk-based decoding of the data. - We only implement the read() function supporting fixed-chunk reading, capped - to a predefined constant buffer length. - - Example - - with open('filename', 'r') as input: - decompressor = DecompressFileWrapper(input) - while True: - chunk = decompressor.read(4096) - if len(chunk) == 0: - break - // handle the chunk with size <= 4096 - """ - - def __init__(self, source_file, buffer_size=_ZLIB_BUFFER_SIZE): - self.source_file = source_file - self.decompressor = zlib.decompressobj() - self.buffer_size = buffer_size - - def __enter__(self): - return self +def _CheckUser(): + if utils.IsDevAppserver(): + return + api_auth.Authorize() + if not utils.IsInternalUser(): + raise api_request_handler.ForbiddenError() - def read(self, size=None): # pylint: disable=invalid-name - if size is None or size < 0: - size = self.buffer_size - # We want to read chunks of data from the buffer, chunks at a time. - temporary_buffer = self.decompressor.unconsumed_tail - if len(temporary_buffer) < self.buffer_size / 2: - raw_buffer = self.source_file.read(size) - if raw_buffer != '': - temporary_buffer += raw_buffer +def AddHistogramsProcessPost(): + datastore_hooks.SetPrivilegedRequest() + token = None - if len(temporary_buffer) == 0: - return u'' + try: + params = json.loads(request.get_data()) + gcs_file_path = params['gcs_file_path'] - decompressed_data = self.decompressor.decompress(temporary_buffer, size) - return decompressed_data + token_id = params.get('upload_completion_token') + if token_id is not None: + token = upload_completion_token.Token.get_by_id(token_id) + upload_completion_token.Token.UpdateObjectState( + token, upload_completion_token.State.PROCESSING) - def close(self): # pylint: disable=invalid-name - self.decompressor.flush() + try: + logging.debug('Loading %s', gcs_file_path) + gcs_file = cloudstorage.open( + gcs_file_path, 'r', retry_params=_RETRY_PARAMS) + with DecompressFileWrapper(gcs_file) as decompressing_file: + histogram_dicts = _LoadHistogramList(decompressing_file) + + gcs_file.close() + + ProcessHistogramSet(histogram_dicts, token) + finally: + cloudstorage.delete(gcs_file_path, retry_params=_RETRY_PARAMS) + + upload_completion_token.Token.UpdateObjectState( + token, upload_completion_token.State.COMPLETED) + return make_response('{}') + + except Exception as e: # pylint: disable=broad-except + logging.error('Error processing histograms: %s', str(e), exc_info=1) + upload_completion_token.Token.UpdateObjectState( + token, upload_completion_token.State.FAILED, str(e)) + return make_response(json.dumps({'error': str(e)})) + + +def _GetCloudStorageBucket(): + if utils.IsStagingEnvironment(): + return 'chromeperf-staging-add-histograms-cache' + return 'add-histograms-cache' + + +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def AddHistogramsPost(): + if utils.IsDevAppserver(): + # Don't require developers to zip the body. + # In prod, the data will be written to cloud storage and processed on the + # taskqueue, so the caller will not see any errors. In dev_appserver, + # process the data immediately so the caller will see errors. + # Also always create upload completion token for such requests. + token, token_info = _CreateUploadCompletionToken() + ProcessHistogramSet( + _LoadHistogramList(six.StringIO(request.get_data())), token) + token.UpdateState(upload_completion_token.State.COMPLETED) + return token_info - def __exit__(self, exception_type, exception_value, execution_traceback): - self.close() - return False + with timing.WallTimeLogger('decompress'): + try: + data_str = request.get_data() + # Try to decompress at most 100 bytes from the data, only to determine + # if we've been given compressed payload. + zlib.decompressobj().decompress(data_str, 100) + logging.info('Received compressed data.') + except zlib.error as e: + data_str = request.form['data'] + if not data_str: + six.raise_from( + api_request_handler.BadRequestError( + 'Missing or uncompressed data.'), e) + data_str = zlib.compress(six.ensure_binary(data_str)) + logging.info('Received uncompressed data.') + + if not data_str: + raise api_request_handler.BadRequestError('Missing "data" parameter') + + filename = uuid.uuid4() + params = {'gcs_file_path': '/%s/%s' % (_GetCloudStorageBucket(), filename)} + + gcs_file = cloudstorage.open( + params['gcs_file_path'], + 'w', + content_type='application/octet-stream', + retry_params=_RETRY_PARAMS) + gcs_file.write(data_str) + gcs_file.close() + + _, token_info = _CreateUploadCompletionToken(params['gcs_file_path']) + params['upload_completion_token'] = token_info['token'] + + retry_options = taskqueue.TaskRetryOptions( + task_retry_limit=_TASK_RETRY_LIMIT) + queue = taskqueue.Queue('default') + + queue.add( + taskqueue.Task( + url='/add_histograms/process', + payload=json.dumps(params), + retry_options=retry_options)) + return token_info def _LoadHistogramList(input_file): @@ -129,125 +190,24 @@ def NormalizeDecimals(obj): except ijson.JSONError as e: # Wrap exception in a ValueError - raise ValueError('Failed to parse JSON: %s' % (e)) + six.raise_from(ValueError('Failed to parse JSON: %s' % (e)), e) return objects -class AddHistogramsProcessHandler(request_handler.RequestHandler): - - def post(self): - datastore_hooks.SetPrivilegedRequest() - token = None - - try: - params = json.loads(self.request.body) - gcs_file_path = params['gcs_file_path'] - - token_id = params.get('upload_completion_token') - if token_id is not None: - token = upload_completion_token.Token.get_by_id(token_id) - upload_completion_token.Token.UpdateObjectState( - token, upload_completion_token.State.PROCESSING) - - try: - logging.debug('Loading %s', gcs_file_path) - gcs_file = cloudstorage.open( - gcs_file_path, 'r', retry_params=_RETRY_PARAMS) - with DecompressFileWrapper(gcs_file) as decompressing_file: - histogram_dicts = _LoadHistogramList(decompressing_file) - - gcs_file.close() - - ProcessHistogramSet(histogram_dicts, token) - finally: - cloudstorage.delete(gcs_file_path, retry_params=_RETRY_PARAMS) - - upload_completion_token.Token.UpdateObjectState( - token, upload_completion_token.State.COMPLETED) - - except Exception as e: # pylint: disable=broad-except - logging.error('Error processing histograms: %r', e.message) - self.response.out.write(json.dumps({'error': e.message})) - - upload_completion_token.Token.UpdateObjectState( - token, upload_completion_token.State.FAILED, e.message) - - -class AddHistogramsHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - self._CheckIsInternalUser() - - def _CreateUploadCompletionToken(self, temporary_staging_file_path=None): - token_info = { - 'token': str(uuid.uuid4()), - 'file': temporary_staging_file_path, - } - token = upload_completion_token.Token( - id=token_info['token'], - temporary_staging_file_path=temporary_staging_file_path, - ) - token.put() - logging.info('Upload completion token created. Token id: %s', - token_info['token']) - return token, token_info - - def Post(self): - if utils.IsDevAppserver(): - # Don't require developers to zip the body. - # In prod, the data will be written to cloud storage and processed on the - # taskqueue, so the caller will not see any errors. In dev_appserver, - # process the data immediately so the caller will see errors. - # Also always create upload completion token for such requests. - token, token_info = self._CreateUploadCompletionToken() - ProcessHistogramSet( - _LoadHistogramList(six.StringIO(self.request.body)), token) - token.UpdateState(upload_completion_token.State.COMPLETED) - return token_info - - with timing.WallTimeLogger('decompress'): - try: - data_str = self.request.body - - # Try to decompress at most 100 bytes from the data, only to determine - # if we've been given compressed payload. - zlib.decompressobj().decompress(data_str, 100) - logging.info('Received compressed data.') - except zlib.error: - data_str = self.request.get('data') - if not data_str: - raise api_request_handler.BadRequestError( - 'Missing or uncompressed data.') - data_str = zlib.compress(data_str) - logging.info('Received uncompressed data.') - - if not data_str: - raise api_request_handler.BadRequestError('Missing "data" parameter') - - filename = uuid.uuid4() - params = {'gcs_file_path': '/add-histograms-cache/%s' % filename} - - gcs_file = cloudstorage.open( - params['gcs_file_path'], - 'w', - content_type='application/octet-stream', - retry_params=_RETRY_PARAMS) - gcs_file.write(data_str) - gcs_file.close() - - _, token_info = self._CreateUploadCompletionToken(params['gcs_file_path']) - params['upload_completion_token'] = token_info['token'] - - retry_options = taskqueue.TaskRetryOptions( - task_retry_limit=_TASK_RETRY_LIMIT) - queue = taskqueue.Queue('default') - queue.add( - taskqueue.Task( - url='/add_histograms/process', - payload=json.dumps(params), - retry_options=retry_options)) - return token_info +def _CreateUploadCompletionToken(temporary_staging_file_path=None): + token_info = { + 'token': str(uuid.uuid4()), + 'file': temporary_staging_file_path, + } + token = upload_completion_token.Token( + id=token_info['token'], + temporary_staging_file_path=temporary_staging_file_path, + ) + token.put() + logging.info('Upload completion token created. Token id: %s', + token_info['token']) + return token, token_info def _LogDebugInfo(histograms): @@ -265,7 +225,7 @@ def _LogDebugInfo(histograms): logging.info('No LOG_URLS in data.') build_urls = hist.diagnostics.get(reserved_infos.BUILD_URLS.name) - if build_urls: + if build_urls and hasattr(build_urls, '__iter__'): build_urls = list(build_urls) msg = 'Build URL: %s' % str(build_urls) logging.info(msg) @@ -548,3 +508,59 @@ def _PurgeHistogramBinData(histograms): keys = list(dm.keys()) for k in keys: del dm[k] + + +def _CheckRequest(condition, msg): + if not condition: + raise api_request_handler.BadRequestError(msg) + + +class DecompressFileWrapper: + """A file-like object implementing inline decompression. + + This class wraps a file-like object and does chunk-based decoding of the data. + We only implement the read() function supporting fixed-chunk reading, capped + to a predefined constant buffer length. + + Example + + with open('filename', 'r') as input: + decompressor = DecompressFileWrapper(input) + while True: + chunk = decompressor.read(4096) + if len(chunk) == 0: + break + // handle the chunk with size <= 4096 + """ + + def __init__(self, source_file, buffer_size=_ZLIB_BUFFER_SIZE): + self.source_file = source_file + self.decompressor = zlib.decompressobj() + self.buffer_size = buffer_size + + def __enter__(self): + return self + + def read(self, size=None): # pylint: disable=invalid-name + if size is None or size < 0: + size = self.buffer_size + + # We want to read chunks of data from the buffer, chunks at a time. + temporary_buffer = self.decompressor.unconsumed_tail + if len(temporary_buffer) < self.buffer_size / 2: + raw_buffer = self.source_file.read(size) + if raw_buffer: + temporary_buffer += raw_buffer + + if len(temporary_buffer) == 0: + return b'' + + decompressed_data = self.decompressor.decompress(temporary_buffer, size) + return decompressed_data + + def close(self): # pylint: disable=invalid-name + self.decompressor.flush() + + def __exit__(self, exception_type, exception_value, execution_traceback): + self.close() + return False diff --git a/dashboard/dashboard/add_histograms_queue.py b/dashboard/dashboard/add_histograms_queue.py index fe983828ec7..749b5482cc8 100644 --- a/dashboard/dashboard/add_histograms_queue.py +++ b/dashboard/dashboard/add_histograms_queue.py @@ -6,11 +6,12 @@ from __future__ import division from __future__ import absolute_import -import itertools import json import logging +import six import sys import uuid +from six.moves import zip_longest from google.appengine.ext import ndb @@ -21,7 +22,6 @@ from dashboard import sheriff_config_client from dashboard.common import datastore_hooks from dashboard.common import histogram_helpers -from dashboard.common import request_handler from dashboard.common import utils from dashboard.models import anomaly from dashboard.models import graph_data @@ -33,6 +33,9 @@ from tracing.value.diagnostics import diagnostic_ref from tracing.value.diagnostics import reserved_infos +from flask import request, make_response + + # Note: annotation names should shorter than add_point._MAX_COLUMN_NAME_LENGTH. DIAGNOSTIC_NAMES_TO_ANNOTATION_NAMES = { reserved_infos.CHROMIUM_COMMIT_POSITIONS.name: @@ -60,8 +63,6 @@ 'r_webrtc_arcturus_cl', reserved_infos.WEBRTC_INTERNAL_RIGEL_REVISIONS.name: 'r_webrtc_rigel_cl', - reserved_infos.WEBRTC_INTERNAL_CAPELLA_REVISIONS.name: - 'r_webrtc_capella_cl', reserved_infos.FUCHSIA_GARNET_REVISIONS.name: 'r_fuchsia_garnet_git', reserved_infos.FUCHSIA_PERIDOT_REVISIONS.name: @@ -84,73 +85,67 @@ def _CheckRequest(condition, msg): raise BadRequestError(msg) -class AddHistogramsQueueHandler(request_handler.RequestHandler): - """Request handler to process a histogram and add it to the datastore. +def AddHistogramsQueuePost(): + """Adds a single histogram or sparse shared diagnostic to the datastore. + + The |data| request parameter can be either a histogram or a sparse shared + diagnostic; the set of diagnostics that are considered sparse (meaning that + they don't normally change on every upload for a given benchmark from a + given bot) is shown in histogram_helpers.SPARSE_DIAGNOSTIC_TYPES. + + See https://goo.gl/lHzea6 for detailed information on the JSON format for + histograms and diagnostics. This request handler is intended to be used only by requests using the task queue; it shouldn't be directly from outside. + + Request parameters: + data: JSON encoding of a histogram or shared diagnostic. + revision: a revision, given as an int. + test_path: the test path to which this diagnostic or histogram should be + attached. """ + datastore_hooks.SetPrivilegedRequest() + + params = json.loads(request.get_data()) - def get(self): - self.post() - - def post(self): - """Adds a single histogram or sparse shared diagnostic to the datastore. - - The |data| request parameter can be either a histogram or a sparse shared - diagnostic; the set of diagnostics that are considered sparse (meaning that - they don't normally change on every upload for a given benchmark from a - given bot) is shown in histogram_helpers.SPARSE_DIAGNOSTIC_TYPES. - - See https://goo.gl/lHzea6 for detailed information on the JSON format for - histograms and diagnostics. - - Request parameters: - data: JSON encoding of a histogram or shared diagnostic. - revision: a revision, given as an int. - test_path: the test path to which this diagnostic or histogram should be - attached. - """ - datastore_hooks.SetPrivilegedRequest() - - params = json.loads(self.request.body) - - _PrewarmGets(params) - - # Roughly, the processing of histograms and the processing of rows can be - # done in parallel since there are no dependencies. - - histogram_futures = [] - token_state_futures = [] - - try: - for p in params: - histogram_futures.append((p, _ProcessRowAndHistogram(p))) - except Exception as e: # pylint: disable=broad-except - for param, futures_info in itertools.izip_longest(params, - histogram_futures): - if futures_info is not None: - continue - token_state_futures.append( - upload_completion_token.Measurement.UpdateStateByPathAsync( - param.get('test_path'), param.get('token'), - upload_completion_token.State.FAILED, e.message)) - ndb.Future.wait_all(token_state_futures) - raise - - for info, futures in histogram_futures: - operation_state = upload_completion_token.State.COMPLETED - error_message = None - for f in futures: - exception = f.get_exception() - if exception is not None: - operation_state = upload_completion_token.State.FAILED - error_message = exception.message + _PrewarmGets(params) + + # Roughly, the processing of histograms and the processing of rows can be + # done in parallel since there are no dependencies. + + histogram_futures = [] + token_state_futures = [] + + try: + for p in params: + histogram_futures.append((p, _ProcessRowAndHistogram(p))) + + except Exception as e: # pylint: disable=broad-except + for param, futures_info in zip_longest(params, histogram_futures): + if futures_info is not None: + continue token_state_futures.append( upload_completion_token.Measurement.UpdateStateByPathAsync( - info.get('test_path'), info.get('token'), operation_state, - error_message)) + param.get('test_path'), param.get('token'), + upload_completion_token.State.FAILED, str(e))) ndb.Future.wait_all(token_state_futures) + raise + + for info, futures in histogram_futures: + operation_state = upload_completion_token.State.COMPLETED + error_message = None + for f in futures: + exception = f.get_exception() + if exception is not None: + operation_state = upload_completion_token.State.FAILED + error_message = str(exception) + token_state_futures.append( + upload_completion_token.Measurement.UpdateStateByPathAsync( + info.get('test_path'), info.get('token'), operation_state, + error_message)) + ndb.Future.wait_all(token_state_futures) + return make_response('') def _GetStoryFromDiagnosticsDict(diagnostics): @@ -179,8 +174,8 @@ def _PrewarmGets(params): test_parts = path_parts[2:] test_key = '%s/%s' % (path_parts[0], path_parts[1]) - for p in test_parts: - test_key += '/%s' % p + for test_part in test_parts: + test_key += '/%s' % test_part keys.add(ndb.Key('TestMetadata', test_key)) ndb.get_multi_async(list(keys)) @@ -192,7 +187,9 @@ def _ProcessRowAndHistogram(params): benchmark_description = params['benchmark_description'] data_dict = params['data'] - logging.info('Processing: %s', test_path) + # Disable this log since it's killing the quota of Cloud Logging API - + # write requests per minute + # logging.info('Processing: %s', test_path) hist = histogram_module.Histogram.FromDict(data_dict) @@ -259,13 +256,18 @@ def _AddRowsFromData(params, revision, parent_test, legacy_parent_tests): test_key = parent_test.key stat_names_to_test_keys = {k: v.key for k, v in legacy_parent_tests.items()} - rows = CreateRowEntities(data_dict, test_key, stat_names_to_test_keys, - revision) - if not rows: + row, stat_name_row_dict = _CreateRowEntitiesInternal(data_dict, test_key, + stat_names_to_test_keys, + revision) + if not row: raise ndb.Return() + rows = [row] + if stat_name_row_dict: + rows.extend(stat_name_row_dict.values()) yield ndb.put_multi_async(rows) + [r.UpdateParentAsync() for r in rows] - logging.debug('Processed %s row entities.', len(rows)) + + logging.info('Added %s rows to Datastore', str(len(rows))) def IsMonitored(client, test): reason = [] @@ -275,7 +277,9 @@ def IsMonitored(client, test): if not test.has_rows: reason.append('has_rows') if reason: - logging.info('Skip test: %s reason=%s', test.key, ','.join(reason)) + # Disable this log since it's killing the quota of Cloud Logging API - + # write requests per minute + # logging.info('Skip test: %s reason=%s', test.key, ','.join(reason)) return False logging.info('Process test: %s', test.key) return True @@ -370,33 +374,49 @@ def GetUnitArgs(unit): unit_args['improvement_direction'] = anomaly.UNKNOWN return unit_args - def CreateRowEntities(histogram_dict, test_metadata_key, stat_names_to_test_keys, revision): + row, stat_name_row_dict = _CreateRowEntitiesInternal(histogram_dict, + test_metadata_key, + stat_names_to_test_keys, + revision) + + if not row: + return None + + rows = [row] + if stat_name_row_dict: + rows.extend(stat_name_row_dict.values()) + + return rows + + +def _CreateRowEntitiesInternal(histogram_dict, test_metadata_key, + stat_names_to_test_keys, revision): h = histogram_module.Histogram.FromDict(histogram_dict) # TODO(#3564): Move this check into _PopulateNumericalFields once we # know that it's okay to put rows that don't have a value/error. if h.num_values == 0: - return None - - rows = [] + return None, None row_dict = _MakeRowDict(revision, test_metadata_key.id(), h) - rows.append( - graph_data.Row( - id=revision, - parent=utils.GetTestContainerKey(test_metadata_key), - **add_point.GetAndValidateRowProperties(row_dict))) + parent_test_key = utils.GetTestContainerKey(test_metadata_key) + row = graph_data.Row( + id=revision, + parent=parent_test_key, + **add_point.GetAndValidateRowProperties(row_dict)) + stat_name_row_dict = {} for stat_name, suffixed_key in stat_names_to_test_keys.items(): + suffixed_parent_test_key = utils.GetTestContainerKey(suffixed_key) row_dict = _MakeRowDict(revision, suffixed_key.id(), h, stat_name=stat_name) - rows.append( - graph_data.Row( - id=revision, - parent=utils.GetTestContainerKey(suffixed_key), - **add_point.GetAndValidateRowProperties(row_dict))) + new_row = graph_data.Row( + id=revision, + parent=suffixed_parent_test_key, + **add_point.GetAndValidateRowProperties(row_dict)) + stat_name_row_dict[stat_name] = new_row - return rows + return row, stat_name_row_dict def _MakeRowDict(revision, test_path, tracing_histogram, stat_name=None): @@ -417,6 +437,25 @@ def _MakeRowDict(revision, test_path, tracing_histogram, stat_name=None): if trace_url_set and not is_summary: d['supplemental_columns']['a_tracing_uri'] = list(trace_url_set)[-1] + try: + bot_id_name = tracing_histogram.diagnostics.get( + reserved_infos.BOT_ID.name) + if bot_id_name: + bot_id_names = list(bot_id_name) + d['supplemental_columns']['a_bot_id'] = bot_id_names + if len(bot_id_names) == 1: + d['swarming_bot_id'] = bot_id_names[0] + + except Exception as e: # pylint: disable=broad-except + logging.warning('bot_id failed. Error: %s', e) + try: + os_detail_vers = tracing_histogram.diagnostics.get( + reserved_infos.OS_DETAILED_VERSIONS.name) + if os_detail_vers: + d['supplemental_columns']['a_os_detail_vers'] = list(os_detail_vers) + except Exception as e: # pylint: disable=broad-except + logging.warning('os_detail_vers failed. Error: %s', e) + for diag_name, annotation in DIAGNOSTIC_NAMES_TO_ANNOTATION_NAMES.items(): revision_info = tracing_histogram.diagnostics.get(diag_name) if not revision_info: @@ -465,7 +504,7 @@ def _AddStdioUri(name, link_list, row_dict): if isinstance(link_list, list): row_dict['supplemental_columns'][name] = '[%s](%s)' % tuple(link_list) # Support busted format until infra changes roll - elif isinstance(link_list, basestring): + elif isinstance(link_list, six.string_types): row_dict['supplemental_columns'][name] = link_list diff --git a/dashboard/dashboard/add_histograms_queue_test.py b/dashboard/dashboard/add_histograms_queue_test.py index fdc7cb4caee..23976a2baf4 100644 --- a/dashboard/dashboard/add_histograms_queue_test.py +++ b/dashboard/dashboard/add_histograms_queue_test.py @@ -7,11 +7,11 @@ from __future__ import absolute_import import copy +from flask import Flask import json import mock import sys import uuid -import webapp2 import webtest from google.appengine.ext import ndb @@ -75,16 +75,21 @@ 'type': 'GenericSet' } +flask_app = Flask(__name__) + +@flask_app.route('/add_histograms_queue', methods=['GET', 'POST']) +def AddHistogramsQueuePost(): + return add_histograms_queue.AddHistogramsQueuePost() + + +@mock.patch('dashboard.common.cloud_metric._PublishTSCloudMetric', + mock.MagicMock()) class AddHistogramsQueueTest(testing_common.TestCase): def setUp(self): - super(AddHistogramsQueueTest, self).setUp() - app = webapp2.WSGIApplication([ - ('/add_histograms_queue', - add_histograms_queue.AddHistogramsQueueHandler) - ]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) self.SetCurrentUser('foo@bar.com', is_admin=True) def testPostHistogram(self): @@ -153,6 +158,57 @@ def testPostHistogram_Internal(self): rows = graph_data.Row.query().fetch() self.assertEqual(7, len(rows)) + self.assertEqual(None, rows[0].swarming_bot_id) + + def testPostHistogram_Internal_swarmingBotId(self): + test_path = 'Chromium/win7/suite/metric' + h2 = copy.deepcopy(TEST_HISTOGRAM) + h2['diagnostics'][reserved_infos.BOT_ID.name] = { + 'type': 'GenericSet', + 'values': ['swarming-bot-id-0'], + } + + params = [{ + 'data': h2, + 'test_path': test_path, + 'benchmark_description': None, + 'revision': 123 + }] + self.testapp.post('/add_histograms_queue', json.dumps(params)) + + histograms = histogram.Histogram.query().fetch() + self.assertEqual(1, len(histograms)) + + rows = graph_data.Row.query().fetch() + self.assertEqual(7, len(rows)) + self.assertEqual('swarming-bot-id-0', rows[0].swarming_bot_id) + + def testPostHistogram_Internal_invalidSwarmingBotId(self): + test_path = 'Chromium/win7/suite/metric' + h2 = copy.deepcopy(TEST_HISTOGRAM) + h2['diagnostics'][reserved_infos.BOT_ID.name] = { + 'type': + 'GenericSet', + 'values': [ + 'swarming-bot-id-0', 'swarming-bot-id-1', 'swarming-bot-id-2', + 'swarming-bot-id-3' + ], + } + + params = [{ + 'data': h2, + 'test_path': test_path, + 'benchmark_description': None, + 'revision': 123 + }] + self.testapp.post('/add_histograms_queue', json.dumps(params)) + + histograms = histogram.Histogram.query().fetch() + self.assertEqual(1, len(histograms)) + + rows = graph_data.Row.query().fetch() + self.assertEqual(7, len(rows)) + self.assertEqual(None, rows[0].swarming_bot_id) def testPostHistogram_WithFreshDiagnostics(self): graph_data.Bot( @@ -397,15 +453,15 @@ def testPostHistogram_EmptyCreatesNoTestsOrRowsOrHistograms(self): def testGetUnitArgs_Up(self): unit_args = add_histograms_queue.GetUnitArgs('count_biggerIsBetter') - self.assertEquals(anomaly.UP, unit_args['improvement_direction']) + self.assertEqual(anomaly.UP, unit_args['improvement_direction']) def testGetUnitArgs_Down(self): unit_args = add_histograms_queue.GetUnitArgs('count_smallerIsBetter') - self.assertEquals(anomaly.DOWN, unit_args['improvement_direction']) + self.assertEqual(anomaly.DOWN, unit_args['improvement_direction']) def testGetUnitArgs_Unknown(self): unit_args = add_histograms_queue.GetUnitArgs('count') - self.assertEquals(anomaly.UNKNOWN, unit_args['improvement_direction']) + self.assertEqual(anomaly.UNKNOWN, unit_args['improvement_direction']) def testCreateRowEntities(self): test_path = 'Chromium/win7/suite/metric' @@ -615,15 +671,13 @@ def testCreateRowEntities_DoesNotAddTraceUriIfDiagnosticIsEmpty(self): mock.MagicMock(return_value=None)) @mock.patch.object(SheriffConfigClient, 'Match', mock.MagicMock(return_value=([], None))) +@mock.patch('dashboard.common.cloud_metric._PublishTSCloudMetric', + mock.MagicMock()) class AddHistogramsQueueTestWithUploadCompletionToken(testing_common.TestCase): def setUp(self): - super(AddHistogramsQueueTestWithUploadCompletionToken, self).setUp() - app = webapp2.WSGIApplication([ - ('/add_histograms_queue', - add_histograms_queue.AddHistogramsQueueHandler) - ]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetIsInternalUser('foo@bar.com', True) self.SetCurrentUser('foo@bar.com') diff --git a/dashboard/dashboard/add_histograms_test.py b/dashboard/dashboard/add_histograms_test.py index 8a409fd6b40..97544ade2be 100644 --- a/dashboard/dashboard/add_histograms_test.py +++ b/dashboard/dashboard/add_histograms_test.py @@ -7,13 +7,15 @@ from __future__ import absolute_import import base64 +from flask import Flask import itertools import json import mock import random +import six import string import sys -import webapp2 +import unittest import webtest import zlib @@ -122,28 +124,28 @@ def _CreateHistogram(name='hist', return histograms -class BufferedFakeFile(object): +class BufferedFakeFile: - def __init__(self, data=str()): + def __init__(self, data=b''): self.data = data self.position = 0 def read(self, size=None): # pylint: disable=invalid-name if self.position == len(self.data): - return '' + return b'' if size is None or size < 0: result = self.data[self.position:] self.position = len(self.data) - return result + return six.ensure_binary(result) if size > len(self.data) + self.position: result = self.data[self.position:] self.position = len(self.data) - return result + return six.ensure_binary(result) current_position = self.position self.position += size result = self.data[current_position:self.position] - return result + return six.ensure_binary(result) def write(self, data): # pylint: disable=invalid-name self.data += data @@ -160,18 +162,34 @@ def __enter__(self): return self +flask_app = Flask(__name__) + + +@flask_app.route('/add_histograms', methods=['POST']) +def AddHistogramsPost(): + return add_histograms.AddHistogramsPost() + + +@flask_app.route('/add_histograms/process', methods=['POST']) +def AddHistogramsProcessPost(): + return add_histograms.AddHistogramsProcessPost() + + +@flask_app.route('/add_histograms_queue', methods=['GET', 'POST']) +def AddHistogramsQueuePost(): + return add_histograms_queue.AddHistogramsQueuePost() + + +@flask_app.route('/uploads/') +def UploadsInfoGet(token_id): + return uploads_info.UploadsInfoGet(token_id) + + class AddHistogramsBaseTest(testing_common.TestCase): def setUp(self): - super(AddHistogramsBaseTest, self).setUp() - app = webapp2.WSGIApplication([ - ('/add_histograms', add_histograms.AddHistogramsHandler), - ('/add_histograms/process', add_histograms.AddHistogramsProcessHandler), - ('/add_histograms_queue', - add_histograms_queue.AddHistogramsQueueHandler), - ('/uploads/(.+)', uploads_info.UploadInfoHandler), - ]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetIsInternalUser('foo@bar.com', True) self.SetCurrentUser('foo@bar.com', is_admin=True) oauth_patcher = mock.patch.object(api_auth, 'oauth') @@ -200,6 +218,7 @@ def PostAddHistogram(self, data, status=200): return r def PostAddHistogramProcess(self, data): + data = six.ensure_binary(data) mock_read = mock.MagicMock(wraps=BufferedFakeFile(zlib.compress(data))) self.mock_cloudstorage.open.return_value = mock_read @@ -217,11 +236,10 @@ def PostAddHistogramProcess(self, data): mock.MagicMock(return_value=None)) @mock.patch.object(SheriffConfigClient, 'Match', mock.MagicMock(return_value=([], None))) +@mock.patch('dashboard.services.skia_bridge_service.SkiaServiceClient', + mock.MagicMock()) class AddHistogramsEndToEndTest(AddHistogramsBaseTest): - def setUp(self): - super(AddHistogramsEndToEndTest, self).setUp() - @mock.patch.object(add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync') @mock.patch.object(add_histograms_queue.find_anomalies, 'ProcessTestsAsync') @@ -272,7 +290,7 @@ def testPost_ZlibSucceeds(self, mock_process_test, mock_graph_revisions): commit_position=123, benchmark_description='Benchmark description.', samples=[1, 2, 3]) - data = zlib.compress(json.dumps(hs.AsDicts())) + data = zlib.compress(six.ensure_binary(json.dumps(hs.AsDicts()))) self.PostAddHistogram(data) self.ExecuteTaskQueueTasks('/add_histograms_queue', @@ -309,7 +327,7 @@ def testPost_BuildUrls_Added(self): benchmark_description='Benchmark description.', samples=[1, 2, 3], build_url='http://foo') - data = zlib.compress(json.dumps(hs.AsDicts())) + data = zlib.compress(six.ensure_binary(json.dumps(hs.AsDicts()))) self.PostAddHistogram(data) self.ExecuteTaskQueueTasks('/add_histograms_queue', @@ -379,7 +397,7 @@ def testPost_IllegalMasterName_Fails(self): data = json.dumps(hs.AsDicts()) response = self.PostAddHistogramProcess(data) - self.assertIn('Illegal slash', response.body) + self.assertIn(b'Illegal slash', response.body) def testPost_IllegalBotName_Fails(self): hs = _CreateHistogram( @@ -387,7 +405,7 @@ def testPost_IllegalBotName_Fails(self): data = json.dumps(hs.AsDicts()) response = self.PostAddHistogramProcess(data) - self.assertIn('Illegal slash', response.body) + self.assertIn(b'Illegal slash', response.body) def testPost_IllegalSuiteName_Fails(self): hs = _CreateHistogram( @@ -395,7 +413,7 @@ def testPost_IllegalSuiteName_Fails(self): data = json.dumps(hs.AsDicts()) response = self.PostAddHistogramProcess(data) - self.assertIn('Illegal slash', response.body) + self.assertIn(b'Illegal slash', response.body) def testPost_DuplicateHistogram_Fails(self): hs1 = _CreateHistogram( @@ -406,7 +424,7 @@ def testPost_DuplicateHistogram_Fails(self): data = json.dumps(hs.AsDicts()) response = self.PostAddHistogramProcess(data) - self.assertIn('Duplicate histogram detected', response.body) + self.assertIn(b'Duplicate histogram detected', response.body) @mock.patch.object(add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync') @@ -488,6 +506,10 @@ def testPost_TestNameEndsContainsButDoesntEndWithRef_ProcessTestIsCalled( add_histograms.TASK_QUEUE_NAME) self.assertTrue(mock_process_test.called) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') @mock.patch.object(add_histograms_queue.graph_revisions, 'AddRowsToCacheAsync', mock.MagicMock()) @mock.patch.object(add_histograms_queue.find_anomalies, 'ProcessTestsAsync', @@ -792,6 +814,10 @@ def _CheckOutOfOrderExpectations(self, expected): for k in expected.keys(): self.assertFalse(expected[k]) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') def testPost_OutOfOrder_SuiteLevel(self): self._AddAtCommit(1, 'd1', 'o1') self._AddAtCommit(10, 'd1', 'o1') @@ -806,6 +832,10 @@ def testPost_OutOfOrder_SuiteLevel(self): } self._CheckOutOfOrderExpectations(expected) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') def testPost_OutOfOrder_HistogramLevel(self): self._AddAtCommit(1, 'd1', 'o1') self._AddAtCommit(10, 'd1', 'o1') @@ -827,9 +857,6 @@ def testPost_OutOfOrder_HistogramLevel(self): mock.MagicMock(return_value=([], None))) class AddHistogramsTest(AddHistogramsBaseTest): - def setUp(self): - super(AddHistogramsTest, self).setUp() - def TaskParams(self): tasks = self.GetTaskQueueTasks(add_histograms.TASK_QUEUE_NAME) params = [] @@ -1603,11 +1630,12 @@ def testLogDebugInfo_NoLogUrls(self, mock_log): mock.MagicMock(return_value=None)) @mock.patch.object(SheriffConfigClient, 'Match', mock.MagicMock(return_value=([], None))) +@mock.patch('dashboard.services.skia_bridge_service.SkiaServiceClient', + mock.MagicMock()) class AddHistogramsUploadCompleteonTokenTest(AddHistogramsBaseTest): def setUp(self): - super(AddHistogramsUploadCompleteonTokenTest, self).setUp() - + super().setUp() self._TrunOnUploadCompletionTokenExperiment() hs = _CreateHistogram( master='master', @@ -1784,6 +1812,9 @@ def testPost_MonitoredMeasurementSucceeds(self): self.assertEqual(len(measurements), 1) self.assertEqual(measurements[0].monitored, True) + # (crbug/1298177) The setup for Flask is not ready yet. We will force the test + # to run in the old setup for now. + @unittest.skipIf(six.PY3, 'DevAppserver not ready yet for python 3.') @mock.patch.object(utils, 'IsDevAppserver', mock.MagicMock(return_value=True)) def testPost_DevAppserverSucceeds(self): token_info = self.PostAddHistogram(self.histogram_data) @@ -1843,6 +1874,10 @@ def testPostLogs_AddHistogramProcessFails(self, mock_log): ] mock_log.assert_has_calls(log_calls, any_order=True) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') def testFullCycle_Success(self): token_info = self.PostAddHistogram({'data': self.histogram_data}) @@ -1877,6 +1912,10 @@ def testFullCycle_Success(self): self.assertEqual(measurement['state'], 'COMPLETED') self.assertEqual(len(measurement['dimensions']), 5) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') @mock.patch.object(add_histograms_queue.find_anomalies, 'ProcessTestsAsync', mock.MagicMock(side_effect=Exception('Test error'))) def testFullCycle_MeasurementFails(self): @@ -1895,7 +1934,7 @@ def testFullCycle_MeasurementFails(self): def RandomChars(length): for _ in itertools.islice(itertools.count(0), length): - yield '%s' % (random.choice(string.letters)) + yield '%s' % (random.choice(string.ascii_letters)) class DecompressFileWrapperTest(testing_common.TestCase): @@ -1903,14 +1942,14 @@ class DecompressFileWrapperTest(testing_common.TestCase): def testBasic(self): filesize = 1024 * 256 random.seed(1) - payload = ''.join([x for x in RandomChars(filesize)]) + payload = ''.join(list(RandomChars(filesize))) random.seed(None) self.assertEqual(len(payload), filesize) - input_file = BufferedFakeFile(zlib.compress(payload)) + input_file = BufferedFakeFile(zlib.compress(six.ensure_binary(payload))) retrieved_payload = str() with add_histograms.DecompressFileWrapper(input_file, 2048) as decompressor: while True: - chunk = decompressor.read(1024) + chunk = six.ensure_str(decompressor.read(1024)) if len(chunk) == 0: break retrieved_payload += chunk @@ -1919,7 +1958,7 @@ def testBasic(self): def testDecompressionFail(self): filesize = 1024 * 256 random.seed(1) - payload = ''.join([x for x in RandomChars(filesize)]) + payload = ''.join(list(RandomChars(filesize))) random.seed(None) self.assertEqual(len(payload), filesize) @@ -1929,7 +1968,7 @@ def testDecompressionFail(self): with self.assertRaises(zlib.error): with add_histograms.DecompressFileWrapper(input_file, 2048) as d: while True: - chunk = d.read(1024) + chunk = six.ensure_str(d.read(1024)) if len(chunk) == 0: break retrieved_payload += chunk @@ -1960,7 +1999,7 @@ def _MakeHistogram(name): reserved_infos.DEVICE_IDS.name, generic_set.GenericSet(['device_foo'])) input_file_compressed = BufferedFakeFile( - zlib.compress(json.dumps(histograms.AsDicts()))) + zlib.compress(six.ensure_binary(json.dumps(histograms.AsDicts())))) input_file_raw = BufferedFakeFile(json.dumps(histograms.AsDicts())) loaded_compressed_histograms = histogram_set.HistogramSet() @@ -1977,9 +2016,9 @@ def _MakeHistogram(name): add_histograms._LoadHistogramList(input_file_raw)) loaded_raw_histograms.DeduplicateDiagnostics() - self.assertEquals( - sorted(loaded_raw_histograms.AsDicts()), - sorted(loaded_compressed_histograms.AsDicts())) + raw_dicts = loaded_raw_histograms.AsDicts() + compressed_dicts = loaded_compressed_histograms.AsDicts() + self.assertCountEqual(raw_dicts, compressed_dicts) def testJSONFail(self): with BufferedFakeFile('Not JSON') as input_file: diff --git a/dashboard/dashboard/add_point.py b/dashboard/dashboard/add_point.py index 5e0020f7297..c0b7f155793 100644 --- a/dashboard/dashboard/add_point.py +++ b/dashboard/dashboard/add_point.py @@ -11,6 +11,7 @@ import logging import math import re +import six from google.appengine.api import datastore_errors from google.appengine.api import taskqueue @@ -23,6 +24,8 @@ from dashboard.common import math_utils from dashboard.models import graph_data +from flask import request, make_response + _TASK_QUEUE_NAME = 'new-points-queue' # Number of rows to process per task queue task. This limits the task size @@ -46,137 +49,131 @@ class BadRequestError(Exception): """An error indicating that a 400 response status should be returned.""" - pass - - -class AddPointHandler(request_handler.RequestHandler): - """URL endpoint to post data to the dashboard.""" - - def post(self): - """Validates data parameter and add task to queue to process points. - The row data comes from a "data" parameter, which is a JSON encoding of a - list of dictionaries, each of which represents one performance result - (one point in a graph) and associated data. - [ - { - "master": "ChromiumPerf", - "bot": "xp-release-dual-core", - "test": "dromaeo/dom/modify", - "revision": 123456789, - "value": 24.66, - "error": 2.33, - "units": "ms", - "supplemental_columns": { - "d_median": 24234.12, - "d_mean": 23.553, - "r_webkit": 423340, - ... - }, +def AddPointPost(): + """Validates data parameter and add task to queue to process points. + + The row data comes from a "data" parameter, which is a JSON encoding of a + list of dictionaries, each of which represents one performance result + (one point in a graph) and associated data. + + [ + { + "master": "ChromiumPerf", + "bot": "xp-release-dual-core", + "test": "dromaeo/dom/modify", + "revision": 123456789, + "value": 24.66, + "error": 2.33, + "units": "ms", + "supplemental_columns": { + "d_median": 24234.12, + "d_mean": 23.553, + "r_webkit": 423340, ... }, ... - ] - - In general, the required fields are "master", "bot", "test" (which together - form the test path which identifies the series that this point belongs to), - and "revision" and "value", which are the X and Y values for the point. - - This API also supports the Dashboard JSON v1.0 format (go/telemetry-json), - the first producer of which is Telemetry. Telemetry provides lightweight - serialization of values it produces, as JSON. If a dashboard JSON object is - passed, it will be a single dict rather than a list, with the test, - value, error, and units fields replaced by a chart_data field containing a - Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is - processed by converting it into rows (which can be viewed as Dashboard JSON - v0). - - { - "master": "ChromiumPerf", - , - "chart_data": { - "foo": { - "bar": { - "type": "scalar", - "name": "foo.bar", - "units": "ms", - "value": 4.2, - }, - "summary": { - "type": "list_of_scalar_values", - "name": "foo", - "units": "ms", - "values": [4.2, 5.7, 6.8], - "std": 1.30512, - }, }, - } - - Request parameters: - data: JSON encoding of a list of dictionaries. - - Outputs: - Empty 200 response with if successful, - 200 response with warning message if optional data is invalid, - 403 response with error message if sender IP is not white-listed, - 400 response with error message if required data is invalid. - 500 with error message otherwise. - """ - datastore_hooks.SetPrivilegedRequest() - try: - api_auth.Authorize() - except api_auth.ApiAuthException as error: - logging.error('Auth error: %s', error) - self.ReportError('User unauthorized.', 403) - return + ... + ] - data_str = self.request.get('data') - if not data_str: - self.ReportError('Missing "data" parameter.', status=400) - return + In general, the required fields are "master", "bot", "test" (which together + form the test path which identifies the series that this point belongs to), + and "revision" and "value", which are the X and Y values for the point. + + This API also supports the Dashboard JSON v1.0 format (go/telemetry-json), + the first producer of which is Telemetry. Telemetry provides lightweight + serialization of values it produces, as JSON. If a dashboard JSON object is + passed, it will be a single dict rather than a list, with the test, + value, error, and units fields replaced by a chart_data field containing a + Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is + processed by converting it into rows (which can be viewed as Dashboard JSON + v0). + + { + "master": "ChromiumPerf", + , + "chart_data": { + "foo": { + "bar": { + "type": "scalar", + "name": "foo.bar", + "units": "ms", + "value": 4.2, + }, + "summary": { + "type": "list_of_scalar_values", + "name": "foo", + "units": "ms", + "values": [4.2, 5.7, 6.8], + "std": 1.30512, + }, + }, + } - self.AddData(data_str) + Request parameters: + data: JSON encoding of a list of dictionaries. - def AddData(self, data_str): - try: - data = json.loads(data_str) - except ValueError: - self.ReportError('Invalid JSON string.', status=400) - return + Outputs: + Empty 200 response with if successful, + 200 response with warning message if optional data is invalid, + 403 response with error message if sender IP is not white-listed, + 400 response with error message if required data is invalid. + 500 with error message otherwise. + """ + datastore_hooks.SetPrivilegedRequest() + try: + api_auth.Authorize() + except api_auth.ApiAuthException as error: + logging.error('Auth error: %s', error) + return request_handler.RequestHandlerReportError('User unauthorized.', 403) - logging.info('Received data: %s', data) + data_str = request.values.get('data') + if not data_str: + return request_handler.RequestHandlerReportError( + 'Missing "data" parameter.', status=400) - try: - if isinstance(data, dict): - if data.get('chart_data'): - data = _DashboardJsonToRawRows(data) - if not data: - return # No data to add, bail out. - else: - self.ReportError( - 'Data should be a list of rows or a Dashboard JSON v1.0 dict.', - status=400) - return - - if data: - # We only need to validate the row ID for one point, since all points - # being handled by this upload should have the same row ID. - last_added_entity = _GetLastAddedEntityForRow(data[0]) - _ValidateRowId(data[0], last_added_entity) - - for row_dict in data: - ValidateRowDict(row_dict) - _AddTasks(data) - except BadRequestError as error: - # If any of the data was invalid, abort immediately and return an error. - self.ReportError(error.message, status=400) + return AddData(data_str) + + +def AddData(data_str): + try: + data = json.loads(data_str) + except ValueError: + return request_handler.RequestHandlerReportError( + 'Invalid JSON string.', status=400) + + try: + if isinstance(data, dict): + if data.get('chart_data'): + data = _DashboardJsonToRawRows(data) + if not data: + return make_response('') # No data to add, bail out. + else: + return request_handler.RequestHandlerReportError( + 'Data should be a list of rows or a Dashboard JSON v1.0 dict.', + status=400) + + if data: + # We only need to validate the row ID for one point, since all points + # being handled by this upload should have the same row ID. + last_added_entity = _GetLastAddedEntityForRow(data[0]) + _ValidateRowId(data[0], last_added_entity) + + for row_dict in data: + ValidateRowDict(row_dict) + _AddTasks(data) + return make_response('') + except BadRequestError as e: + # If any of the data was invalid, abort immediately and return an error. + return request_handler.RequestHandlerReportError(str(e), status=400) def _ValidateNameString(value, name): if not value: raise BadRequestError('No %s name given.' % name) - if not isinstance(value, basestring): + if not isinstance(value, six.string_types): raise BadRequestError('Error: %s must be a string' % name) if '/' in value: raise BadRequestError('Illegal slash in %s' % name) @@ -275,7 +272,8 @@ def _TestSuiteName(dash_json_dict): try: name = dash_json_dict['chart_data']['benchmark_name'] except KeyError as e: - raise BadRequestError('Could not find test suite name. ' + e.message) + six.raise_from( + BadRequestError('Could not find test suite name. ' + str(e)), e) _ValidateNameString(name, 'test_suite_name') @@ -413,11 +411,15 @@ def _FlattenTrace(test_suite_name, elif trace_name != 'summary' and is_ref: name += '_ref' + units = trace.get('units') + if units is None: + raise BadRequestError('Units must be specified in the chart data') + row_dict = { 'test': name, 'value': value, 'error': error, - 'units': trace['units'], + 'units': units, 'tracing_uri': tracing_uri, 'benchmark_description': benchmark_description, } @@ -457,8 +459,9 @@ def _ExtractValueAndError(trace): return float('nan'), 0 try: return float(value), 0 - except: - raise BadRequestError('Expected scalar value, got: %r' % value) + except Exception as e: # pylint: disable=broad-except + six.raise_from( + BadRequestError('Expected scalar value, got: %r' % value), e) if trace_type == 'list_of_scalar_values': values = trace.get('values') @@ -488,7 +491,7 @@ def _ExtractValueAndError(trace): def _IsNumber(v): - return isinstance(v, float) or isinstance(v, int) or isinstance(v, int) + return isinstance(v, (float, int)) def _GeomMeanAndStdDevFromHistogram(histogram): @@ -551,11 +554,10 @@ def _ImprovementDirectionToHigherIsBetter(improvement_direction_str): # TODO(eakuefner): Fail instead of falling back after fixing crbug.com/459450. if improvement_direction_str == 'up': return True - elif improvement_direction_str == 'down': + if improvement_direction_str == 'down': return False - else: - raise BadRequestError('Invalid improvement direction string: ' + - improvement_direction_str) + raise BadRequestError('Invalid improvement direction string: ' + + improvement_direction_str) def _GetLastAddedEntityForRow(row): @@ -568,7 +570,7 @@ def _GetLastAddedEntityForRow(row): try: last_added_revision_entity = ndb.Key('LastAddedRevision', path).get() except datastore_errors.BadRequestError: - logging.warn('Datastore BadRequestError when getting %s', path) + logging.warning('Datastore BadRequestError when getting %s', path) return None return last_added_revision_entity @@ -714,8 +716,9 @@ def GetAndValidateRowId(row_dict): raise BadRequestError('Required field "revision" missing.') try: return int(row_dict['revision']) - except (ValueError, TypeError): - raise BadRequestError('Bad value for "revision", should be numerical.') + except (ValueError, TypeError) as e: + raise BadRequestError( + 'Bad value for "revision", should be numerical.') from e def GetAndValidateRowProperties(row): @@ -747,15 +750,21 @@ def GetAndValidateRowProperties(row): raise BadRequestError('No "value" given.') try: columns['value'] = float(row['value']) - except (ValueError, TypeError): - raise BadRequestError('Bad value for "value", should be numerical.') + except (ValueError, TypeError) as e: + six.raise_from( + BadRequestError('Bad value for "value", should be numerical.'), e) if 'error' in row: try: error = float(row['error']) columns['error'] = error except (ValueError, TypeError): - logging.warn('Bad value for "error".') - + logging.warning('Bad value for "error".') + if 'swarming_bot_id' in row: + try: + swarming_bot_id = str(row['swarming_bot_id']) + columns['swarming_bot_id'] = swarming_bot_id + except (ValueError, TypeError): + logging.warning('Bad value for "swarming_bot_id".') columns.update(_GetSupplementalColumns(row)) return columns @@ -782,7 +791,7 @@ def _GetSupplementalColumns(row): for (name, value) in row.get('supplemental_columns', {}).items(): # Don't allow too many columns if len(columns) == _MAX_NUM_COLUMNS: - logging.warn('Too many columns, some being dropped.') + logging.warning('Too many columns, some being dropped.') break value = _CheckSupplementalColumn(name, value) if value: @@ -795,12 +804,12 @@ def _CheckSupplementalColumn(name, value): # Check length of column name. name = str(name) if len(name) > _MAX_COLUMN_NAME_LENGTH: - logging.warn('Supplemental column name too long.') + logging.warning('Supplemental column name too long.') return None # The column name has a prefix which indicates type of value. if name[:2] not in ('d_', 'r_', 'a_'): - logging.warn('Bad column name "%s", invalid prefix.', name) + logging.warning('Bad column name "%s", invalid prefix.', name) return None # The d_ prefix means "data column", intended to hold numbers. @@ -808,7 +817,7 @@ def _CheckSupplementalColumn(name, value): try: value = float(value) except (ValueError, TypeError): - logging.warn('Bad value for column "%s", should be numerical.', name) + logging.warning('Bad value for column "%s", should be numerical.', name) return None # The r_ prefix means "revision", and the value should look like a number, @@ -821,15 +830,25 @@ def _CheckSupplementalColumn(name, value): ] if (not value or len(str(value)) > _STRING_COLUMN_MAX_LENGTH or not any(re.match(p, str(value)) for p in revision_patterns)): - logging.warn('Bad value for revision column "%s". Value: %s', name, value) + logging.warning('Bad value for revision column "%s". Value: %s', name, + value) return None value = str(value) if name.startswith('a_'): - # Annotation column, should be a short string. + # Annotation column, is typically a short string. + # Bot_ID lists can be long, truncate if exceed max length if len(str(value)) > _STRING_COLUMN_MAX_LENGTH: - logging.warn('Value for "%s" too long, max length is %d.', name, - _STRING_COLUMN_MAX_LENGTH) - return None + logging.warning('Value for "%s" too long, truncated to max length %d.', + name, + _STRING_COLUMN_MAX_LENGTH) + if isinstance(value, list): + while len(str(value)) > _STRING_COLUMN_MAX_LENGTH: + value.pop() + elif isinstance(value, str): + value = value[:_STRING_COLUMN_MAX_LENGTH] + else: + logging.warning('Value for "%s" is not truncatable', name) + return None return value diff --git a/dashboard/dashboard/add_point_queue.py b/dashboard/dashboard/add_point_queue.py index 38f29cebe73..3240665a6a8 100644 --- a/dashboard/dashboard/add_point_queue.py +++ b/dashboard/dashboard/add_point_queue.py @@ -18,79 +18,79 @@ from dashboard import units_to_direction from dashboard import sheriff_config_client from dashboard.common import datastore_hooks -from dashboard.common import request_handler from dashboard.common import utils from dashboard.models import anomaly from dashboard.models import graph_data +from flask import request, make_response -class AddPointQueueHandler(request_handler.RequestHandler): - """Request handler to process points and add them to the datastore. - This request handler is intended to be used only by requests using the - task queue; it shouldn't be directly from outside. - """ +def AddPointQueuePost(): + """Adds a set of points from the post data. - def get(self): - """A get request is the same a post request for this endpoint.""" - self.post() - - def post(self): - """Adds a set of points from the post data. - - Request parameters: - data: JSON encoding of a list of dictionaries. Each dictionary represents - one point to add. For each dict, one Row entity will be added, and - any required TestMetadata or Master or Bot entities will be created. - """ - datastore_hooks.SetPrivilegedRequest() - - data = json.loads(self.request.get('data')) - _PrewarmGets(data) - - all_put_futures = [] - added_rows = [] - parent_tests = [] - for row_dict in data: - try: - new_row, parent_test, put_futures = _AddRow(row_dict) - added_rows.append(new_row) - parent_tests.append(parent_test) - all_put_futures.extend(put_futures) - - except add_point.BadRequestError as e: - logging.error('Could not add %s, it was invalid.', e.message) - except datastore_errors.BadRequestError as e: - logging.info('While trying to store %s', row_dict) - logging.error('Datastore request failed: %s.', e.message) - return - - ndb.Future.wait_all(all_put_futures) - - client = sheriff_config_client.GetSheriffConfigClient() - tests_keys = [] - for t in parent_tests: - reason = [] - subscriptions, _ = client.Match(t.test_path, check=True) - if not subscriptions: - reason.append('subscriptions') - if not t.has_rows: - reason.append('has_rows') - if IsRefBuild(t.key): - reason.append('RefBuild') - if reason: - logging.info('Skip test: %s reason=%s', t.key, ','.join(reason)) - continue - logging.info('Process test: %s', t.key) - tests_keys.append(t.key) - - # Updating of the cached graph revisions should happen after put because - # it requires the new row to have a timestamp, which happens upon put. - futures = [ - graph_revisions.AddRowsToCacheAsync(added_rows), - find_anomalies.ProcessTestsAsync(tests_keys) - ] - ndb.Future.wait_all(futures) + Request parameters: + data: JSON encoding of a list of dictionaries. Each dictionary represents + one point to add. For each dict, one Row entity will be added, and + any required TestMetadata or Master or Bot entities will be created. + """ + datastore_hooks.SetPrivilegedRequest() + + data = json.loads(request.values.get('data')) + _PrewarmGets(data) + + all_put_futures = [] + added_rows = [] + parent_tests = [] + + for row_dict in data: + try: + new_row, parent_test, put_futures = _AddRow(row_dict) + + added_rows.append(new_row) + parent_tests.append(parent_test) + all_put_futures.extend(put_futures) + + except add_point.BadRequestError as e: + logging.error('Could not add %s, it was invalid.', str(e)) + except datastore_errors.BadRequestError as e: + logging.info('While trying to store %s', row_dict) + logging.error('Datastore request failed: %s.', str(e)) + # We should return a response with more information. We kept an + # empty response here to align with the webapp2 implementation. + # A possible option: + # return request_handler.RequestHandlerReportError( + # 'Datastore request failed: %s.' % str(e), status=400) + return make_response('') + + ndb.Future.wait_all(all_put_futures) + + client = sheriff_config_client.GetSheriffConfigClient() + tests_keys = set() + for t in parent_tests: + reason = [] + subscriptions, _ = client.Match(t.test_path, check=True) + if not subscriptions: + reason.append('subscriptions') + if not t.has_rows: + reason.append('has_rows') + if IsRefBuild(t.key): + reason.append('RefBuild') + if reason: + # Disable this log since it's killing the quota of Cloud Logging API - + # write requests per minute + # logging.info('Skip test: %s reason=%s', t.key, ','.join(reason)) + continue + logging.info('Process test: %s', t.key) + tests_keys.add(t.key) + + # Updating of the cached graph revisions should happen after put because + # it requires the new row to have a timestamp, which happens upon put. + futures = [ + graph_revisions.AddRowsToCacheAsync(added_rows), + find_anomalies.ProcessTestsAsync(tests_keys) + ] + ndb.Future.wait_all(futures) + return make_response('') def _PrewarmGets(data): diff --git a/dashboard/dashboard/add_point_queue_test.py b/dashboard/dashboard/add_point_queue_test.py index 78318055614..64132fb3eed 100644 --- a/dashboard/dashboard/add_point_queue_test.py +++ b/dashboard/dashboard/add_point_queue_test.py @@ -17,7 +17,7 @@ class GetOrCreateAncestorsTest(testing_common.TestCase): def setUp(self): - super(GetOrCreateAncestorsTest, self).setUp() + super().setUp() self.SetCurrentUser('foo@bar.com', is_admin=True) def testGetOrCreateAncestors_GetsExistingEntities(self): @@ -79,6 +79,73 @@ def testGetOrCreateAncestors_RespectsImprovementDirectionForNewTest(self): 'M', 'b', 'suite/foo', units='bogus', improvement_direction=anomaly.UP) self.assertEqual(anomaly.UP, test.improvement_direction) + def testGetOrCreateAncestors_UpdatesAllExpectedEntities(self): + # pylint: disable=line-too-long + t = graph_data.TestMetadata( + id='WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests', + internal_only=True) + t.UpdateSheriff() + t.put() + + t = graph_data.TestMetadata( + id='WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests/render_frame_rate_fps', + internal_only=True) + t.UpdateSheriff() + t.put() + + master_key = graph_data.Master(id='WebRTCPerf', parent=None).put() + graph_data.Bot( + id='android32-pixel5-android11', parent=master_key, + internal_only=False).put() + + test_path = 'WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests/render_frame_rate_fps/foreman_cif_delay_50_0_plr_5_flexfec' + test_path_parts = test_path.split('/') + master = test_path_parts[0] + bot = test_path_parts[1] + full_test_name = '/'.join(test_path_parts[2:]) + internal_only = graph_data.Bot.GetInternalOnlySync(master, bot) + + parent = add_point_queue.GetOrCreateAncestors( + master, bot, full_test_name, internal_only=internal_only) + self.assertEqual( + 'WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests/render_frame_rate_fps/foreman_cif_delay_50_0_plr_5_flexfec', + parent.key.id()) + # Check that all the Bot and TestMetadata entities were correctly added. + created_masters = graph_data.Master.query().fetch() + created_bots = graph_data.Bot.query().fetch() + created_tests = graph_data.TestMetadata.query().fetch() + self.assertEqual(1, len(created_masters)) + self.assertEqual(1, len(created_bots)) + self.assertEqual(3, len(created_tests)) + + self.assertEqual('WebRTCPerf', created_masters[0].key.id()) + self.assertIsNone(created_masters[0].key.parent()) + + self.assertEqual('android32-pixel5-android11', created_bots[0].key.id()) + self.assertEqual('WebRTCPerf', created_bots[0].key.parent().id()) + self.assertFalse(created_bots[0].internal_only) + + self.assertEqual('WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests', + created_tests[0].key.id()) + self.assertIsNone(created_tests[0].parent_test) + self.assertEqual('android32-pixel5-android11', created_tests[0].bot_name) + self.assertFalse(created_tests[0].internal_only) + + self.assertEqual('render_frame_rate_fps', created_tests[1].test_part1_name) + self.assertEqual('WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests', + created_tests[1].parent_test.id()) + self.assertIsNone(created_tests[1].bot) + self.assertFalse(created_tests[1].internal_only) + + self.assertEqual( + 'WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests/render_frame_rate_fps/foreman_cif_delay_50_0_plr_5_flexfec', + created_tests[2].key.id()) + self.assertEqual( + 'WebRTCPerf/android32-pixel5-android11/webrtc_perf_tests/render_frame_rate_fps', + created_tests[2].parent_test.id()) + self.assertIsNone(created_tests[2].bot) + self.assertFalse(created_tests[2].internal_only) + # pylint: enable=line-too-long if __name__ == '__main__': unittest.main() diff --git a/dashboard/dashboard/add_point_test.py b/dashboard/dashboard/add_point_test.py index c5987b20d42..41b216ae948 100644 --- a/dashboard/dashboard/add_point_test.py +++ b/dashboard/dashboard/add_point_test.py @@ -7,12 +7,13 @@ from __future__ import absolute_import import copy +from flask import Flask import json import math import unittest import mock -import webapp2 +import six import webtest from google.appengine.api import datastore_errors @@ -173,6 +174,19 @@ } +flask_app = Flask(__name__) + + +@flask_app.route('/add_point', methods=['POST']) +def AddPointPost(): + return add_point.AddPointPost() + + +@flask_app.route('/add_point_queue', methods=['GET', 'POST']) +def AddPointQueuePost(): + return add_point_queue.AddPointQueuePost() + + #TODO(fancl): mocking Match to return some actuall result @mock.patch.object(SheriffConfigClient, '__init__', mock.MagicMock(return_value=None)) @@ -181,11 +195,8 @@ class AddPointTest(testing_common.TestCase): def setUp(self): - super(AddPointTest, self).setUp() - app = webapp2.WSGIApplication([('/add_point', add_point.AddPointHandler), - ('/add_point_queue', - add_point_queue.AddPointQueueHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) units_to_direction.UpdateFromJson(_UNITS_TO_DIRECTION_DICT) self.SetCurrentUser( 'foo-service-account@testing.gserviceaccount.com', is_admin=True) @@ -328,7 +339,7 @@ def testPost_TestNameEndsWithUnderscoreRef_ProcessTestIsNotCalled( point['test'] = '1234/abcd_ref' self.testapp.post('/add_point', {'data': json.dumps([point])}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) - mock_process_test.assert_called_once_with([]) + mock_process_test.assert_called_once_with(set()) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync') def testPost_TestNameEndsWithSlashRef_ProcessTestIsNotCalled( @@ -338,7 +349,7 @@ def testPost_TestNameEndsWithSlashRef_ProcessTestIsNotCalled( point['test'] = '1234/ref' self.testapp.post('/add_point', {'data': json.dumps([point])}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) - mock_process_test.assert_called_once_with([]) + mock_process_test.assert_called_once_with(set()) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTestsAsync') def testPost_TestNameEndsContainsButDoesntEndWithRef_ProcessTestIsCalled( @@ -399,42 +410,42 @@ def testPost_BenchmarkName_Slash_DataRejected(self): point['test_suite_name'] = 'no/slashes' response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Illegal slash in test_suite_name', response.body) + self.assertIn(b'Illegal slash in test_suite_name', response.body) def testPost_BenchmarkName_NotString_DataRejected(self): point = copy.deepcopy(_SAMPLE_DASHBOARD_JSON) point['test_suite_name'] = ['name'] response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Error: test_suite_name must be a string', response.body) + self.assertIn(b'Error: test_suite_name must be a string', response.body) def testPost_BotName_Slash_DataRejected(self): point = copy.deepcopy(_SAMPLE_DASHBOARD_JSON) point['bot'] = 'no/slashes' response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Illegal slash in bot', response.body) + self.assertIn(b'Illegal slash in bot', response.body) def testPost_BotName_NotString_DataRejected(self): point = copy.deepcopy(_SAMPLE_DASHBOARD_JSON) point['bot'] = ['name'] response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Error: bot must be a string', response.body) + self.assertIn(b'Error: bot must be a string', response.body) def testPost_MasterName_Slash_DataRejected(self): point = copy.deepcopy(_SAMPLE_DASHBOARD_JSON) point['master'] = 'no/slashes' response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Illegal slash in master', response.body) + self.assertIn(b'Illegal slash in master', response.body) def testPost_MasterName_NotString_DataRejected(self): point = copy.deepcopy(_SAMPLE_DASHBOARD_JSON) point['master'] = ['name'] response = self.testapp.post( '/add_point', {'data': json.dumps(point)}, status=400) - self.assertIn('Error: master must be a string', response.body) + self.assertIn(b'Error: master must be a string', response.body) def testPost_TestNameHasDoubleUnderscores_Rejected(self): point = copy.deepcopy(_SAMPLE_POINT) @@ -476,7 +487,7 @@ def testPost_InvalidRevision_Rejected(self): point['revision'] = 'I am not a valid revision number!' response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400) - self.assertIn('Bad value for "revision", should be numerical.\n', + self.assertIn(b'Bad value for "revision", should be numerical.\n', response.body) def testPost_InvalidZeroRevision_Rejected(self): @@ -484,7 +495,7 @@ def testPost_InvalidZeroRevision_Rejected(self): point['revision'] = '0' response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400) - self.assertIn('must not be <= 0', response.body) + self.assertIn(b'must not be <= 0', response.body) def testPost_InvalidSupplementalRevision_DropsRevision(self): point = copy.deepcopy(_SAMPLE_POINT) @@ -834,7 +845,7 @@ def testPost_NoValue_Rejected(self): del point['value'] response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400) - self.assertIn('No "value" given.\n', response.body) + self.assertIn(b'No "value" given.\n', response.body) self.assertIsNone(graph_data.Row.query().get()) def testPost_WithBadValue_Rejected(self): @@ -844,7 +855,7 @@ def testPost_WithBadValue_Rejected(self): response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) - self.assertIn('Bad value for "value", should be numerical.\n', + self.assertIn(b'Bad value for "value", should be numerical.\n', response.body) self.assertIsNone(graph_data.Row.query().get()) @@ -895,7 +906,7 @@ def testPost_LongSupplementalColumnName_ColumnDropped(self): row = graph_data.Row.query().get() self.assertFalse(hasattr(row, key)) - def testPost_LongSupplementalAnnotation_ColumnDropped(self): + def testPost_LongSupplementalAnnotation_ColumnTruncated(self): point = copy.deepcopy(_SAMPLE_POINT) point['supplemental_columns'] = { 'a_one': 'z' * (add_point._STRING_COLUMN_MAX_LENGTH + 1), @@ -905,7 +916,8 @@ def testPost_LongSupplementalAnnotation_ColumnDropped(self): self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Row properties with names that are too long are not added. row = graph_data.Row.query().get() - self.assertFalse(hasattr(row, 'a_one')) + self.assertTrue(hasattr(row, 'a_one')) + self.assertTrue(len(row.a_one), add_point._STRING_COLUMN_MAX_LENGTH) self.assertEqual('hello', row.a_two) def testPost_BadSupplementalDataColumn_ColumnDropped(self): @@ -922,6 +934,14 @@ def testPost_BadSupplementalDataColumn_ColumnDropped(self): self.assertFalse(hasattr(row, 'd_run_1')) self.assertEqual(42.5, row.d_run_2) + # crbug/1403845 + # When running in flask, the second call on /add_point is routed to the + # handler of /add_point_queue. Haven't figured out the reason yet. Will + # disable the test on RevisionTooLow_Rejected and RevisionTooHigh_Rejected + # for now. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') def testPost_RevisionTooLow_Rejected(self): # If a point's ID is much lower than the last one, it should be rejected # because this indicates that the revision type was accidentally changed. @@ -933,20 +953,22 @@ def testPost_RevisionTooLow_Rejected(self): test_path = 'ChromiumPerf/win7/my_test_suite/my_test' last_added_revision = ndb.Key('LastAddedRevision', test_path).get() self.assertEqual(1408479179, last_added_revision.revision) - point = copy.deepcopy(_SAMPLE_POINT) point['revision'] = 285000 self.testapp.post('/add_point', {'data': json.dumps([point])}, status=400) rows = graph_data.Row.query().fetch() self.assertEqual(1, len(rows)) + # (crbug/1403845): Routing is broken after ExecuteTaskQueueTasks is called. + @unittest.skipIf(six.PY3, ''' + http requests after ExecuteTaskQueueTasks are not routed correctly for py3. + ''') def testPost_RevisionTooHigh_Rejected(self): # First add one point; it's accepted because it's the first in the series. point = copy.deepcopy(_SAMPLE_POINT) point['revision'] = 285000 self.testapp.post('/add_point', {'data': json.dumps([point])}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) - point = copy.deepcopy(_SAMPLE_POINT) point['revision'] = 1408479179 self.testapp.post('/add_point', {'data': json.dumps([point])}, status=400) @@ -1104,6 +1126,15 @@ def _SampleTrace(): 'value': 42, } + @staticmethod + def _SampleTrace_Invalid_Key(): + return { + 'name': 'bar.baz', + 'unit': 'meters', #The key should be 'units' + 'type': 'scalar', + 'value': 42, + } + def testFlattenTrace_PreservesUnits(self): """Tests that _FlattenTrace preserves the units property.""" trace = self._SampleTrace() @@ -1310,6 +1341,13 @@ def testFlattenTrace_FlattensGroupingLabelToFivePartName(self): row = add_point._FlattenTrace('foo', 'baz@@bar', 'https://abc.xyz/', trace) self.assertEqual(row['test'], 'foo/bar/baz/https___abc.xyz_') + def testFlattenTrace_InvalidUnitsKey(self): + """Tests that a BadRequestError is thrown if the 'units' key is + not provided in the data.""" + trace = self._SampleTrace_Invalid_Key() + with self.assertRaises(add_point.BadRequestError): + add_point._FlattenTrace('foo', 'bar', 'summary', trace) + if __name__ == '__main__': unittest.main() diff --git a/dashboard/dashboard/alert_groups.py b/dashboard/dashboard/alert_groups.py index 1517291acff..1d13c14e99d 100644 --- a/dashboard/dashboard/alert_groups.py +++ b/dashboard/dashboard/alert_groups.py @@ -8,22 +8,51 @@ import logging -from dashboard.common import request_handler +from flask import make_response, request +from collections import Counter + +from dashboard.common import cloud_metric from dashboard.models import alert_group from dashboard.models import alert_group_workflow +from dashboard.services import perf_issue_service_client from google.appengine.ext import deferred from google.appengine.ext import ndb from google.appengine.api import taskqueue +DEFAULT_UNGROUPED_GROUP_NAME = 'Ungrouped' +SKIA_UNGROUPED_GROUP_NAME = 'Ungrouped_Skia' + +UNGROUPED_GROUP_MAPPING = { + alert_group.AlertGroup.Type.test_suite: DEFAULT_UNGROUPED_GROUP_NAME, + alert_group.AlertGroup.Type.test_suite_skia: SKIA_UNGROUPED_GROUP_NAME +} + + +def _GetUngroupedGroupName(group_type: int): + group_name = UNGROUPED_GROUP_MAPPING.get(group_type, None) + if not group_name: + logging.warning('Unsupported group type: %s', group_type) + + return group_name + + def _ProcessAlertGroup(group_key): workflow = alert_group_workflow.AlertGroupWorkflow(group_key.get()) logging.info('Processing group: %s', group_key.string_id()) workflow.Process() -def _ProcessUngroupedAlerts(): - groups = alert_group.AlertGroup.GetAll() +def _ProcessUngroupedAlerts(group_type: int): + ''' Process alerts which need a new group + ''' + # Parity + try: + parity_results = perf_issue_service_client.PostUngroupedAlerts(group_type) + except Exception as e: # pylint: disable=broad-except + logging.warning('Parity failed in calling PostUngroupedAlerts. %s', str(e)) + + groups = alert_group.AlertGroup.GetAll(group_type=group_type) # TODO(fancl): This is an inefficient algorithm, as it's linear to the number # of groups. We should instead create an interval tree so that it's @@ -37,14 +66,40 @@ def FindGroup(group): logging.info('Processing un-grouped alerts.') reserved = alert_group.AlertGroup.Type.reserved - ungrouped_list = alert_group.AlertGroup.Get('Ungrouped', reserved) + ungrouped_group_name = _GetUngroupedGroupName(group_type) + if not ungrouped_group_name: + return + + ungrouped_list = alert_group.AlertGroup.Get(ungrouped_group_name, reserved) if not ungrouped_list: - alert_group.AlertGroup(name='Ungrouped', group_type=reserved, - active=True).put() + alert_group.AlertGroup( + name=ungrouped_group_name, group_type=reserved, active=True).put() + logging.info('Created a new ungrouped alert group with name %s', + ungrouped_group_name) return + ungrouped = ungrouped_list[0] ungrouped_anomalies = ndb.get_multi(ungrouped.anomalies) + logging.info('%i anomalies found in %s group: %s', len(ungrouped_anomalies), + ungrouped_group_name, ungrouped.anomalies) + + # Parity on anomaly counts under ungrouped + try: + ungrouped_anomaly_keys = [ + str(a.key.id()) for a in ungrouped_anomalies + ] + new_ungrouped_anomaly_keys = [str(k) for k in list(parity_results.keys())] + if sorted(ungrouped_anomaly_keys) != sorted(new_ungrouped_anomaly_keys): + logging.warning( + 'Imparity found for PostUngroupedAlerts - anomaly count. %s, %s', + ungrouped_anomaly_keys, new_ungrouped_anomaly_keys) + cloud_metric.PublishPerfIssueServiceGroupingImpariry( + 'PostUngroupedAlerts') + except Exception as e: # pylint: disable=broad-except + logging.warning('Parity failed in PostUngroupedAlerts - anomaly count. %s', + str(e)) + # Scan all ungrouped anomalies and create missing groups. This doesn't # mean their groups are not created so we still need to check if group # has been created. There are two cases: @@ -53,34 +108,96 @@ def FindGroup(group): # 2. Groups may be created during the iteration. # Newly created groups won't be updated until next iteration. for anomaly_entity in ungrouped_anomalies: - anomaly_entity.groups = [ - FindGroup(g) or g.put() for g in - alert_group.AlertGroup.GenerateAllGroupsForAnomaly(anomaly_entity) - ] - logging.info('Persisting anomalies') - ndb.put_multi(ungrouped_anomalies) - - -def ProcessAlertGroups(): - logging.info('Fetching alert groups.') - groups = alert_group.AlertGroup.GetAll() + new_count = 0 + alert_groups = [] + all_groups = alert_group.AlertGroup.GenerateAllGroupsForAnomaly( + anomaly_entity) + for g in all_groups: + found_group = FindGroup(g) + if found_group: + alert_groups.append(found_group) + else: + new_group = g.key + alert_groups.append(new_group) + new_count += 1 + anomaly_entity.groups = alert_groups + + # parity on changes on group changes on anomalies + try: + single_parity = parity_results.get(anomaly_entity.key.id(), None) + if single_parity: + existings = single_parity['existing_groups'] + news = single_parity['new_groups'] + if new_count != len(news): + logging.warning( + 'Imparity found for PostUngroupedAlerts - new groups. %s, %s', + new_count, len(news)) + cloud_metric.PublishPerfIssueServiceGroupingImpariry( + 'PostUngroupedAlerts - new groups') + group_keys = [group.key.string_id() for group in groups] + for g in existings: + if g not in group_keys: + logging.warning( + 'Imparity found for PostUngroupedAlerts - old groups. %s, %s', + existings, group_keys) + cloud_metric.PublishPerfIssueServiceGroupingImpariry( + 'PostUngroupedAlerts - old groups') + except Exception as e: # pylint: disable=broad-except + logging.warning( + 'Parity failed in PostUngroupedAlerts - group match on %s. %s', + anomaly_entity.key, str(e)) + + +def ProcessAlertGroups(group_type: int): + logging.info('Fetching alert groups of type %i.', group_type) + groups = alert_group.AlertGroup.GetAll(group_type=group_type) + ungrouped_group = alert_group.AlertGroup.Get( + group_name=UNGROUPED_GROUP_MAPPING[group_type], + group_type=alert_group.AlertGroup.Type.reserved) + if len(ungrouped_group) > 0: + groups.append(ungrouped_group[0]) logging.info('Found %s alert groups.', len(groups)) - for group in groups: + # Parity on get all + try: + group_keys = perf_issue_service_client.GetAllActiveAlertGroups(group_type) + logging.info('Parity found %s alert groups.', len(group_keys)) + original_group_keys = [str(g.key.id()) for g in groups] + parity_keys = list(map(str, group_keys)) + new_groups = ndb.get_multi([ndb.Key('AlertGroup', k) for k in group_keys]) + if sorted(parity_keys) != sorted(original_group_keys): + logging.warning('Imparity found for GetAllActiveAlertGroups. %s, %s', + group_keys, original_group_keys) + cloud_metric.PublishPerfIssueServiceGroupingImpariry( + 'GetAllActiveAlertGroups') + except Exception as e: # pylint: disable=broad-except + logging.warning('Parity logic failed in GetAllActiveAlertGroups. %s', + str(e)) + + new_groups_keys = map(lambda g: g.key, new_groups) + unique_groups_keys = list(set(new_groups_keys)) + duplicate_keys = list(Counter(new_groups_keys) - Counter(unique_groups_keys)) + if duplicate_keys: + logging.warning('Found duplicate alert groups keys: %s', + [key.id() for key in duplicate_keys]) + + for group_key in unique_groups_keys: deferred.defer( _ProcessAlertGroup, - group.key, + group_key, _queue='update-alert-group-queue', _retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0), ) deferred.defer( _ProcessUngroupedAlerts, + group_type, _queue='update-alert-group-queue', _retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0), ) -class AlertGroupsHandler(request_handler.RequestHandler): +@cloud_metric.APIMetric("chromeperf", "/alert_groups_update") +def AlertGroupsGet(): """Create and Update AlertGroups. All active groups are fetched and updated in every iteration. Auto-Triage @@ -95,12 +212,16 @@ class AlertGroupsHandler(request_handler.RequestHandler): - Untriaged: Only improvements in the group or auto-triage not enabled. - Closed: Issue closed. """ - def get(self): - logging.info('Queueing task for deferred processing.') - # Do not retry failed tasks. - deferred.defer( - ProcessAlertGroups, - _queue='update-alert-group-queue', - _retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0), - ) - self.response.write('OK') + logging.info('Queueing task for deferred processing.') + + group_type = request.args.get('group_type', + alert_group.AlertGroup.Type.test_suite) + + # Do not retry failed tasks. + deferred.defer( + ProcessAlertGroups, + int(group_type), + _queue='update-alert-group-queue', + _retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0), + ) + return make_response('OK') diff --git a/dashboard/dashboard/alert_groups_test.py b/dashboard/dashboard/alert_groups_test.py index bafdcaf2154..cfdedb1bdde 100644 --- a/dashboard/dashboard/alert_groups_test.py +++ b/dashboard/dashboard/alert_groups_test.py @@ -1,6 +1,7 @@ # Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +# pylint: disable=too-many-lines from __future__ import print_function from __future__ import division @@ -8,14 +9,17 @@ import mock import datetime - +from flask import Flask import logging import unittest -import webapp2 import webtest +from google.appengine.ext import ndb + from dashboard import alert_groups from dashboard import sheriff_config_client +from dashboard.common import namespaced_stored_object +from dashboard.common import sandwich_allowlist from dashboard.common import testing_common from dashboard.common import utils from dashboard.models import alert_group @@ -25,15 +29,23 @@ from dashboard.models import subscription from dashboard.services import crrev_service from dashboard.services import pinpoint_service +from dashboard.services import workflow_service _SERVICE_ACCOUNT_EMAIL = 'service-account@chromium.org' +flask_app = Flask(__name__) + + +@flask_app.route('/alert_groups_update') +def AlertGroupsGet(): + return alert_groups.AlertGroupsGet() + @mock.patch.object(utils, 'ServiceAccountEmail', lambda: _SERVICE_ACCOUNT_EMAIL) class GroupReportTestBase(testing_common.TestCase): def __init__(self, *args, **kwargs): - super(GroupReportTestBase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fake_issue_tracker = testing_common.FakeIssueTrackerService() self.fake_issue_tracker.comments.append({ 'id': 1, @@ -47,29 +59,146 @@ def __init__(self, *args, **kwargs): infos={}, revisions={}) def setUp(self): - super(GroupReportTestBase, self).setUp() + super().setUp() self.maxDiff = None - app = webapp2.WSGIApplication([('/alert_groups_update', - alert_groups.AlertGroupsHandler)]) - self.testapp = webtest.TestApp(app) + self.testapp = webtest.TestApp(flask_app) + namespaced_stored_object.Set('repositories', { + 'chromium': { + 'repository_url': 'git://chromium' + }, + }) def _CallHandler(self): result = self.testapp.get('/alert_groups_update') self.ExecuteDeferredTasks('update-alert-group-queue') return result + def _FindDuplicateGroupsMock(self, key_string): + key = ndb.Key('AlertGroup', key_string) + query = alert_group.AlertGroup.query( + alert_group.AlertGroup.active == True, + alert_group.AlertGroup.canonical_group == key) + duplicated_groups = query.fetch() + duplicated_keys = [g.key.string_id() for g in duplicated_groups] + return duplicated_keys + + def _FindCanonicalGroupMock(self, key_string, merged_into, + merged_issue_project): + key = ndb.Key('AlertGroup', key_string) + query = alert_group.AlertGroup.query( + alert_group.AlertGroup.active == True, + alert_group.AlertGroup.bug.project == merged_issue_project, + alert_group.AlertGroup.bug.bug_id == merged_into) + query_result = query.fetch(limit=1) + if not query_result: + return None + + canonical_group = query_result[0] + visited = set() + while canonical_group.canonical_group: + visited.add(canonical_group.key) + next_group_key = canonical_group.canonical_group + # Visited check is just precaution. + # If it is true - the system previously failed to prevent loop creation. + if next_group_key == key or next_group_key in visited: + return None + canonical_group = next_group_key.get() + return {'key': canonical_group.key.string_id()} + + def _GetAllActiveAlertGroupsMock(self, group_type: int): + all_groups = alert_group.AlertGroup.GetAll(group_type=group_type) + all_keys = [g.key.id() for g in all_groups] + ungrouped_group = alert_group.AlertGroup.Get( + group_name='Ungrouped', + group_type=alert_group.AlertGroup.Type.reserved) + if len(ungrouped_group) > 0: + all_keys.append(ungrouped_group[0].key.id()) + return all_keys + + def _PostUngroupedAlertsMock(self, group_type): + groups = alert_group.AlertGroup.GetAll(group_type=group_type) + + def FindGroup(group): + for g in groups: + if group.IsOverlapping(g): + return g.key + groups.append(group) + return None + + reserved = alert_group.AlertGroup.Type.reserved + ungrouped_list = alert_group.AlertGroup.Get('Ungrouped', reserved) + if not ungrouped_list: + alert_group.AlertGroup( + name='Ungrouped', group_type=reserved, active=True).put() + return {} + ungrouped = ungrouped_list[0] + ungrouped_anomalies = ndb.get_multi(ungrouped.anomalies) + + parity_results = {} + for anomaly_entity in ungrouped_anomalies: + new_count = 0 + new_alert_groups = [] + all_groups = alert_group.AlertGroup.GenerateAllGroupsForAnomaly( + anomaly_entity) + for g in all_groups: + found_group = FindGroup(g) + if found_group: + new_alert_groups.append(found_group) + else: + new_group = g.put() + new_alert_groups.append(new_group) + new_count += 1 + anomaly_entity.groups = new_alert_groups + parity_results[anomaly_entity.key.id()] = { + 'new_groups': [g.id() for g in new_alert_groups], + 'existing_groups': [] + } + logging.info('Persisting %i anomalies', len(ungrouped_anomalies)) + ndb.put_multi(ungrouped_anomalies) + return parity_results + + def _PatchPerfIssueService(self, function_name, mock_function): + perf_issue_post_patcher = mock.patch(function_name, mock_function) + perf_issue_post_patcher.start() + self.addCleanup(perf_issue_post_patcher.stop) + def _SetUpMocks(self, mock_get_sheriff_client): sheriff = subscription.Subscription(name='sheriff', auto_triage_enable=True) mock_get_sheriff_client().Match.return_value = ([sheriff], None) - self.PatchObject(alert_group_workflow, '_IssueTracker', - lambda: self.fake_issue_tracker) self.PatchObject(crrev_service, 'GetNumbering', lambda *args, **kargs: {'git_sha': 'abcd'}) + new_job = mock.MagicMock(return_value={'jobId': '123456'}) self.PatchObject(pinpoint_service, 'NewJob', new_job) self.PatchObject(alert_group_workflow, 'revision_info_client', self.fake_revision_info) + self.PatchObject(alert_group, 'NONOVERLAP_THRESHOLD', 100) + + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.GetIssue', + self.fake_issue_tracker.GetIssue) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.GetIssueComments', + self.fake_issue_tracker.GetIssueComments) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.PostIssue', + self.fake_issue_tracker.NewBug) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.PostIssueComment', + self.fake_issue_tracker.AddBugComment) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.GetDuplicateGroupKeys', + self._FindDuplicateGroupsMock) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.GetCanonicalGroupByIssue', + self._FindCanonicalGroupMock) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.GetAllActiveAlertGroups', + self._GetAllActiveAlertGroupsMock) + self._PatchPerfIssueService( + 'dashboard.services.perf_issue_service_client.PostUngroupedAlerts', + self._PostUngroupedAlertsMock) def _AddAnomaly(self, **kargs): default = { @@ -93,20 +222,25 @@ def _AddAnomaly(self, **kargs): a = anomaly.Anomaly(**default) clt = sheriff_config_client.GetSheriffConfigClient() subscriptions, _ = clt.Match(a) - a.groups = alert_group.AlertGroup.GetGroupsForAnomaly(a, subscriptions) + groups_for_anomaly = alert_group.AlertGroup.GetGroupsForAnomaly( + a, subscriptions) + a.groups = groups_for_anomaly return a.put() @mock.patch.object(utils, 'ServiceAccountEmail', lambda: _SERVICE_ACCOUNT_EMAIL) @mock.patch('dashboard.sheriff_config_client.GetSheriffConfigClient') +@mock.patch.object(sandwich_allowlist, 'CheckAllowlist', + testing_common.CheckSandwichAllowlist) class GroupReportTest(GroupReportTestBase): def testNoGroup(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) # Put an anomaly before Ungrouped is created self._AddAnomaly() - def testCreatingUngrouped(self, _): + def testCreatingUngrouped(self, mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) self.assertIs( len( alert_group.AlertGroup.Get( @@ -154,8 +288,8 @@ def testMultipleAltertsGroupingDifferentDomain_BeforeGroupCreated( for g in alert_group.AlertGroup.Get( 'test_suite', alert_group.AlertGroup.Type.test_suite) } - self.assertItemsEqual(groups['master'].anomalies, [a1, a2, a4]) - self.assertItemsEqual(groups['other'].anomalies, [a3]) + self.assertCountEqual(groups['master'].anomalies, [a1, a2, a4]) + self.assertCountEqual(groups['other'].anomalies, [a3]) def testMultipleAltertsGroupingDifferentDomain_AfterGroupCreated( self, mock_get_sheriff_client): @@ -181,8 +315,8 @@ def testMultipleAltertsGroupingDifferentDomain_AfterGroupCreated( for g in alert_group.AlertGroup.Get( 'test_suite', alert_group.AlertGroup.Type.test_suite) } - self.assertItemsEqual(groups['master'].anomalies, [a1, a2, a4]) - self.assertItemsEqual(groups['other'].anomalies, [a3]) + self.assertCountEqual(groups['master'].anomalies, [a1, a2, a4]) + self.assertCountEqual(groups['other'].anomalies, [a3]) def testMultipleAltertsGroupingDifferentBot(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) @@ -201,7 +335,7 @@ def testMultipleAltertsGroupingDifferentBot(self, mock_get_sheriff_client): 'test_suite', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.anomalies, [a1, a2, a3, a4]) + self.assertCountEqual(group.anomalies, [a1, a2, a3, a4]) def testMultipleAltertsGroupingDifferentSuite(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) @@ -220,12 +354,12 @@ def testMultipleAltertsGroupingDifferentSuite(self, mock_get_sheriff_client): 'test_suite', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.anomalies, [a1, a2, a4]) + self.assertCountEqual(group.anomalies, [a1, a2, a4]) group = alert_group.AlertGroup.Get( 'other', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.anomalies, [a3]) + self.assertCountEqual(group.anomalies, [a3]) def testMultipleAltertsGroupingOverrideSuite(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) @@ -238,11 +372,11 @@ def testMultipleAltertsGroupingOverrideSuite(self, mock_get_sheriff_client): start_revision=50, end_revision=150, ) - a3 = self._AddAnomaly( + self._AddAnomaly( test='master/bot/other/measurement/test_case', alert_grouping=['test_suite', 'test_suite_other1'], ) - a4 = self._AddAnomaly( + self._AddAnomaly( test='master/bot/test_suite/measurement/test_case', median_before_anomaly=0, alert_grouping=['test_suite_other1', 'test_suite_other2'], @@ -255,22 +389,7 @@ def testMultipleAltertsGroupingOverrideSuite(self, mock_get_sheriff_client): 'test_suite', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.anomalies, [a1, a2]) - group = alert_group.AlertGroup.Get( - 'test_suite', - alert_group.AlertGroup.Type.logical, - )[0] - self.assertItemsEqual(group.anomalies, [a3]) - group = alert_group.AlertGroup.Get( - 'test_suite_other1', - alert_group.AlertGroup.Type.logical, - )[0] - self.assertItemsEqual(group.anomalies, [a3, a4]) - group = alert_group.AlertGroup.Get( - 'test_suite_other2', - alert_group.AlertGroup.Type.logical, - )[0] - self.assertItemsEqual(group.anomalies, [a4]) + self.assertCountEqual(group.anomalies, [a1, a2]) def testMultipleAltertsGroupingMultipleSheriff(self, mock_get_sheriff_client): @@ -320,10 +439,11 @@ def testMultipleAltertsGroupingMultipleSheriff(self, for g in alert_group.AlertGroup.Get( 'test_suite', alert_group.AlertGroup.Type.test_suite) } - self.assertItemsEqual(groups.keys(), ['sheriff1', 'sheriff2', 'sheriff3']) - self.assertItemsEqual(groups['sheriff1'].anomalies, [a1, a2]) - self.assertItemsEqual(groups['sheriff2'].anomalies, [a1, a3]) - self.assertItemsEqual(groups['sheriff3'].anomalies, [a3]) + self.assertCountEqual( + list(groups.keys()), ['sheriff1', 'sheriff2', 'sheriff3']) + self.assertCountEqual(groups['sheriff1'].anomalies, [a1, a2]) + self.assertCountEqual(groups['sheriff2'].anomalies, [a1, a3]) + self.assertCountEqual(groups['sheriff3'].anomalies, [a3]) def testMultipleAltertsGroupingPointRange(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) @@ -340,7 +460,7 @@ def testMultipleAltertsGroupingPointRange(self, mock_get_sheriff_client): 'test_suite', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.anomalies, [a1, a2]) + self.assertCountEqual(group.anomalies, [a1, a2]) def testArchiveAltertsGroup(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) @@ -397,8 +517,15 @@ def testArchiveAltertsGroupIssueClosed(self, mock_get_sheriff_client): )[0] self.assertEqual(group.name, 'test_suite') - def testTriageAltertsGroup(self, mock_get_sheriff_client): + @mock.patch('dashboard.common.utils.ShouldDelayIssueReporting', + mock.MagicMock(return_value=False)) + @unittest.expectedFailure + def testTriageAltertsGroup_Sandwiched(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) + ], None) self._CallHandler() # Add anomalies a = self._AddAnomaly() @@ -420,15 +547,106 @@ def testTriageAltertsGroup(self, mock_get_sheriff_client): alert_group.AlertGroup.Type.test_suite, )[0] self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['components'], + self.assertEqual(self.fake_issue_tracker.new_bug_kwargs['components'], []) + self.assertEqual( + sorted(self.fake_issue_tracker.new_bug_kwargs['labels']), + sorted([ + 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', + 'Chromeperf-Auto-Triaged' + ])) + self.assertRegex(self.fake_issue_tracker.new_bug_kwargs['description'], + r'Top 1 affected measurements in bot:') + self.assertEqual(a.get().bug_id, 12345) + self.assertEqual(group.bug.bug_id, 12345) + # Make sure we don't file the issue again for this alert group. + self.fake_issue_tracker.new_bug_args = None + self.fake_issue_tracker.new_bug_kwargs = None + self._CallHandler() + self.assertIsNone(self.fake_issue_tracker.new_bug_args) + self.assertIsNone(self.fake_issue_tracker.new_bug_kwargs) + + @mock.patch('dashboard.common.utils.ShouldDelayIssueReporting', + mock.MagicMock(return_value=True)) + @unittest.expectedFailure + def testTriageAltertsGroup_Sandwiched_DelayReport(self, + mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) + ], None) + self._CallHandler() + # Add anomalies + a = self._AddAnomaly() + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) + self.assertEqual(self.fake_issue_tracker.new_bug_kwargs['components'], + ['Speed>Regressions']) + self.assertEqual( + sorted(self.fake_issue_tracker.new_bug_kwargs['labels']), + sorted([ + 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', + 'Chromeperf-Auto-Triaged', 'Chromeperf-Delay-Reporting' + ])) + self.assertRegex(self.fake_issue_tracker.new_bug_kwargs['description'], + r'Top 1 affected measurements in bot:') + self.assertEqual(a.get().bug_id, 12345) + self.assertEqual(group.bug.bug_id, 12345) + # Make sure we don't file the issue again for this alert group. + self.fake_issue_tracker.new_bug_args = None + self.fake_issue_tracker.new_bug_kwargs = None + self._CallHandler() + self.assertIsNone(self.fake_issue_tracker.new_bug_args) + self.assertIsNone(self.fake_issue_tracker.new_bug_kwargs) + + def testTriageAltertsGroup_NotSandwiched(self, mock_get_sheriff_client): + blocked_test_name = 'master/bot/blocked-test_suite/measurement/test_case' + self._SetUpMocks(mock_get_sheriff_client) + self._CallHandler() + # Add anomalies + a = self._AddAnomaly(test=blocked_test_name) + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'blocked-test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'blocked-test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) + self.assertCountEqual(self.fake_issue_tracker.new_bug_kwargs['components'], ['Foo>Bar']) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['labels'], [ + self.assertCountEqual(self.fake_issue_tracker.new_bug_kwargs['labels'], [ 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', 'Chromeperf-Auto-Triaged' ]) - logging.debug('Rendered:\n%s', self.fake_issue_tracker.new_bug_args[1]) - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 1 affected measurements in bot:') + self.assertRegex(self.fake_issue_tracker.new_bug_kwargs['description'], + r'Top 1 affected measurements in bot:') self.assertEqual(a.get().bug_id, 12345) self.assertEqual(group.bug.bug_id, 12345) # Make sure we don't file the issue again for this alert group. @@ -466,19 +684,19 @@ def testTriageAltertsGroup_MultipleBenchmarks(self, mock_get_sheriff_client): alert_group.AlertGroup.Type.test_suite, )[0] self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['components'], + self.assertCountEqual(self.fake_issue_tracker.new_bug_kwargs['components'], ['Foo>Bar']) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['labels'], [ + self.assertCountEqual(self.fake_issue_tracker.new_bug_kwargs['labels'], [ 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', 'Chromeperf-Auto-Triaged' ]) logging.debug('Rendered:\n%s', self.fake_issue_tracker.new_bug_args[1]) - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 4 affected measurements in bot:') - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 1 affected in test_suite:') - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 1 affected in other_test_suite:') + self.assertRegex(self.fake_issue_tracker.new_bug_args[1], + r'Top 4 affected measurements in bot:') + self.assertRegex(self.fake_issue_tracker.new_bug_args[1], + r'Top 1 affected in test_suite:') + self.assertRegex(self.fake_issue_tracker.new_bug_args[1], + r'Top 1 affected in other_test_suite:') self.assertEqual(a.get().bug_id, 12345) self.assertEqual(group.bug.bug_id, 12345) # Make sure we don't file the issue again for this alert group. @@ -488,8 +706,14 @@ def testTriageAltertsGroup_MultipleBenchmarks(self, mock_get_sheriff_client): self.assertIsNone(self.fake_issue_tracker.new_bug_args) self.assertIsNone(self.fake_issue_tracker.new_bug_kwargs) - def testTriageAltertsGroupNoOwners(self, mock_get_sheriff_client): + @mock.patch('dashboard.common.utils.ShouldDelayIssueReporting', + mock.MagicMock(return_value=False)) + def testTriageAltertsGroupNoOwners_Sandwiched(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) + ], None) self._CallHandler() # Add anomalies a = self._AddAnomaly(ownership={ @@ -514,15 +738,99 @@ def testTriageAltertsGroupNoOwners(self, mock_get_sheriff_client): alert_group.AlertGroup.Type.test_suite, )[0] self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['components'], - ['Foo>Bar']) - self.assertItemsEqual(self.fake_issue_tracker.new_bug_kwargs['labels'], [ - 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', - 'Chromeperf-Auto-Triaged' - ]) + self.assertEqual(self.fake_issue_tracker.new_bug_kwargs['components'], []) + self.assertEqual( + sorted(self.fake_issue_tracker.new_bug_kwargs['labels']), + sorted([ + 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', + 'Chromeperf-Auto-Triaged' + ])) + self.assertEqual(a.get().bug_id, 12345) + + @mock.patch('dashboard.common.utils.ShouldDelayIssueReporting', + mock.MagicMock(return_value=True)) + def testTriageAltertsGroupNoOwners_Sandwiched_DelayReport( + self, mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) + ], None) + self._CallHandler() + # Add anomalies + a = self._AddAnomaly(ownership={ + 'component': 'Foo>Bar', + 'emails': None, + }) + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) + self.assertEqual(self.fake_issue_tracker.new_bug_kwargs['components'], + ['Speed>Regressions']) + self.assertEqual( + sorted(self.fake_issue_tracker.new_bug_kwargs['labels']), + sorted([ + 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', + 'Chromeperf-Auto-Triaged', 'Chromeperf-Delay-Reporting' + ])) + self.assertEqual(a.get().bug_id, 12345) + + def testTriageAltertsGroupNoOwners_NotSandwiched(self, + mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + self._CallHandler() + blocked_test_name = 'master/bot/blocked-test_suite/measurement/test_case' + # Add anomalies + a = self._AddAnomaly( + test=blocked_test_name, + ownership={ + 'component': 'Foo>Bar', + 'emails': None, + }) + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'blocked-test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'blocked-test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + self.assertEqual(group.status, alert_group.AlertGroup.Status.triaged) + self.assertEqual(self.fake_issue_tracker.new_bug_kwargs['components'], + ['Foo>Bar']) + self.assertEqual( + sorted(self.fake_issue_tracker.new_bug_kwargs['labels']), + sorted([ + 'Pri-2', 'Restrict-View-Google', 'Type-Bug-Regression', + 'Chromeperf-Auto-Triaged' + ])) self.assertEqual(a.get().bug_id, 12345) - def testAddAlertsAfterTriage(self, mock_get_sheriff_client): + def testAddAlertsAfterTriage_Sandwiched(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) self._CallHandler() # Add anomalies @@ -546,28 +854,94 @@ def testAddAlertsAfterTriage(self, mock_get_sheriff_client): self._AddAnomaly(), self._AddAnomaly(median_before_anomaly=0), ] + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) + ], None) + self._CallHandler() for a in anomalies: self.assertEqual(a.get().bug_id, 12345) - logging.debug('Rendered:\n%s', self.fake_issue_tracker.add_comment_args[1]) self.assertEqual(self.fake_issue_tracker.add_comment_args[0], 12345) - self.assertItemsEqual( - self.fake_issue_tracker.add_comment_kwargs['components'], ['Foo>Bar']) - self.assertRegexpMatches(self.fake_issue_tracker.add_comment_args[1], - r'Top 2 affected measurements in bot:') + self.assertEqual(self.fake_issue_tracker.add_comment_kwargs['components'], + []) + + def testAddAlertsAfterTriage_NotSandwiched(self, mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + self._CallHandler() + blocked_test_name = 'master/blocked-bot/blocked-test_suite/measurement/test_case' + # Add anomalies + a = self._AddAnomaly(test=blocked_test_name) + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'blocked-test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + + # Add anomalies + anomalies = [ + self._AddAnomaly(test=blocked_test_name), + self._AddAnomaly(test=blocked_test_name, median_before_anomaly=0), + ] + self._CallHandler() + for a in anomalies: + self.assertEqual(a.get().bug_id, 12345) + self.assertEqual(self.fake_issue_tracker.add_comment_args[0], 12345) + self.assertEqual(self.fake_issue_tracker.add_comment_kwargs['components'], + ['Foo>Bar']) + + def testMultipleAltertsNonoverlapThreshold(self, mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + self._CallHandler() + perf_test = 'ChromiumPerf/bot/test_suite/measurement/test_case' + + # Anomalies without range overlap. + a1 = self._AddAnomaly(start_revision=10, end_revision=40, test=perf_test) + a2 = self._AddAnomaly(start_revision=50, end_revision=150, test=perf_test) + a4 = self._AddAnomaly(start_revision=200, end_revision=300, test=perf_test) + self._CallHandler() + + # Anomaly that overlaps with first 2 alert groups. + a5 = self._AddAnomaly(start_revision=5, end_revision=100, test=perf_test) + + # Anomaly that exceeds nonoverlap threshold of all existing alert groups. + a6 = self._AddAnomaly(start_revision=5, end_revision=305, test=perf_test) + self._CallHandler() + + # Anomaly that binds to a6's group. + a7 = self._AddAnomaly(start_revision=10, end_revision=300, test=perf_test) + self._CallHandler() + self._CallHandler() + + groups = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + ) + + anomaly_groups = [group.anomalies for group in groups] + expected_anomaly_groups = [[a1, a5], [a2, a5], [a4], [a6, a7]] + + self.assertCountEqual(anomaly_groups, expected_anomaly_groups) @mock.patch.object(utils, 'ServiceAccountEmail', lambda: _SERVICE_ACCOUNT_EMAIL) @mock.patch('dashboard.sheriff_config_client.GetSheriffConfigClient') +@mock.patch.object(sandwich_allowlist, 'CheckAllowlist', + testing_common.CheckSandwichAllowlist) class RecoveredAlertsTests(GroupReportTestBase): def __init__(self, *args, **kwargs): - super(RecoveredAlertsTests, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.anomalies = [] - def setUp(self): - super(RecoveredAlertsTests, self).setUp() - def InitAfterMocks(self): # First create the 'Ungrouped' AlertGroup. self._CallHandler() @@ -597,9 +971,10 @@ def testNoRecovered(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) self.InitAfterMocks() self._CallHandler() - logging.debug('Rendered:\n%s', self.fake_issue_tracker.new_bug_args[1]) - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 1 affected measurements in bot:') + logging.debug('Rendered:\n%s', + self.fake_issue_tracker.new_bug_kwargs['description']) + self.assertRegex(self.fake_issue_tracker.new_bug_kwargs['description'], + r'Top 1 affected measurements in bot:') def testClosesIssueOnAllRecovered(self, mock_get_sheriff_client): # Ensure that we close the issue if all regressions in the group have been @@ -607,17 +982,18 @@ def testClosesIssueOnAllRecovered(self, mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) self.InitAfterMocks() self._CallHandler() - logging.debug('Rendered:\n%s', self.fake_issue_tracker.new_bug_args[1]) - self.assertRegexpMatches(self.fake_issue_tracker.new_bug_args[1], - r'Top 1 affected measurements in bot:') + logging.debug('Rendered:\n%s', + self.fake_issue_tracker.new_bug_kwargs['description']) + self.assertRegex(self.fake_issue_tracker.new_bug_kwargs['description'], + r'Top 1 affected measurements in bot:') # Mark one of the anomalies recovered. recovered_anomaly = self.anomalies[0].get() recovered_anomaly.recovered = True recovered_anomaly.put() self._CallHandler() self.assertEqual(self.fake_issue_tracker.issue['state'], 'closed') - self.assertRegexpMatches( - self.fake_issue_tracker.add_comment_args[1], + self.assertRegex( + self.fake_issue_tracker.add_comment_kwargs['comment'], r'All regressions for this issue have been marked recovered; closing.') def testReopensClosedIssuesWithNewRegressions(self, mock_get_sheriff_client): @@ -634,13 +1010,14 @@ def testReopensClosedIssuesWithNewRegressions(self, mock_get_sheriff_client): end_revision=75, test='master/bot/test_suite/measurement/other_test_case') self._CallHandler() - logging.debug('Rendered:\n%s', self.fake_issue_tracker.add_comment_args[1]) + logging.debug('Rendered:\n%s', + self.fake_issue_tracker.add_comment_kwargs['comment']) self.assertEqual(self.fake_issue_tracker.issue["state"], 'open') - self.assertRegexpMatches( - self.fake_issue_tracker.add_comment_args[1], + self.assertRegex( + self.fake_issue_tracker.add_comment_kwargs['comment'], r'Reopened due to new regressions detected for this alert group:') - self.assertRegexpMatches(self.fake_issue_tracker.add_comment_args[1], - r'test_suite/measurement/other_test_case') + self.assertRegex(self.fake_issue_tracker.add_comment_kwargs['comment'], + r'test_suite/measurement/other_test_case') def testManualClosedIssuesWithNewRegressions(self, mock_get_sheriff_client): # pylint: disable=no-value-for-parameter @@ -663,19 +1040,60 @@ def testManualClosedIssuesWithNewRegressions(self, mock_get_sheriff_client): end_revision=75, test='master/bot/test_suite/measurement/other_test_case') self._CallHandler() - logging.debug('Rendered:\n%s', self.fake_issue_tracker.add_comment_args[1]) + logging.debug('Rendered:\n%s', + self.fake_issue_tracker.add_comment_kwargs['comment']) self.assertEqual(self.fake_issue_tracker.issue["state"], 'closed') - self.assertRegexpMatches(self.fake_issue_tracker.add_comment_args[1], - r'test_suite/measurement/other_test_case') + self.assertRegex(self.fake_issue_tracker.add_comment_kwargs['comment'], + r'test_suite/measurement/other_test_case') - def testStartAutoBisection(self, mock_get_sheriff_client): + @mock.patch.object(workflow_service, 'CreateExecution', return_value='fake-execution-id') + def testStartAutoBisection_Sandwiched_DoNotBisectYet(self, + mock_workflow_service, + mock_get_sheriff_client): self._SetUpMocks(mock_get_sheriff_client) mock_get_sheriff_client().Match.return_value = ([ - subscription.Subscription(name='sheriff', - auto_triage_enable=True, - auto_bisect_enable=True) + subscription.Subscription( + name='sheriff', auto_triage_enable=True, auto_bisect_enable=True) ], None) + self.assertIsNotNone(mock_workflow_service) + self._CallHandler() + # Add anomalies + self._AddAnomaly() + # Create Group + self._CallHandler() + # Update Group to associate alerts + self._CallHandler() + # Set Create timestamp to 2 hours ago + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) + group.put() + # Submit issue + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + # Should not jump straight to bisecting because it should first run a + # verification workflow to verify the reported regression. + self._CallHandler() + group = alert_group.AlertGroup.Get( + 'test_suite', + alert_group.AlertGroup.Type.test_suite, + )[0] + self.assertEqual(group.bisection_ids, []) + self.assertEqual(group.status, alert_group.AlertGroup.Status.sandwiched) + def testStartAutoBisection_NotSandwiched(self, mock_get_sheriff_client): + self._SetUpMocks(mock_get_sheriff_client) + mock_get_sheriff_client().Match.return_value = ([ + subscription.Subscription( + name='blocked-sheriff', + auto_triage_enable=True, + auto_bisect_enable=True) + ], None) self._CallHandler() # Add anomalies self._AddAnomaly() @@ -702,7 +1120,7 @@ def testStartAutoBisection(self, mock_get_sheriff_client): 'test_suite', alert_group.AlertGroup.Type.test_suite, )[0] - self.assertItemsEqual(group.bisection_ids, ['123456']) + self.assertEqual(group.bisection_ids, ['123456']) @mock.patch.object(utils, 'ServiceAccountEmail', @@ -751,17 +1169,19 @@ def testAlertGroups_OnePerProject(self): alert_group.AlertGroup.Type.test_suite, ) self.assertEqual(2, len(groups)) - self.assertItemsEqual(['chromium', 'v8'], [g.project_id for g in groups]) + self.assertCountEqual(['chromium', 'v8'], [g.project_id for g in groups]) for group in groups: group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) group.put() # And that we've filed two issues. self._CallHandler() - self.assertItemsEqual([{ + self.assertCountEqual([{ 'method': 'NewBug', - 'args': (mock.ANY, mock.ANY), + 'args': (), 'kwargs': { + 'title': mock.ANY, + 'description': mock.ANY, 'project': 'v8', 'cc': [], 'labels': mock.ANY, @@ -769,8 +1189,10 @@ def testAlertGroups_OnePerProject(self): }, }, { 'method': 'NewBug', - 'args': (mock.ANY, mock.ANY), + 'args': (), 'kwargs': { + 'title': mock.ANY, + 'description': mock.ANY, 'project': 'chromium', 'cc': [], 'labels': mock.ANY, @@ -801,10 +1223,12 @@ def testAlertGroups_NonChromium(self): group.created = datetime.datetime.utcnow() - datetime.timedelta(hours=2) group.put() self._CallHandler() - self.assertItemsEqual([{ + self.assertCountEqual([{ 'method': 'NewBug', - 'args': (mock.ANY, mock.ANY), + 'args': (), 'kwargs': { + 'title': mock.ANY, + 'description': mock.ANY, 'project': 'non-chromium', 'cc': [], 'labels': mock.ANY, diff --git a/dashboard/dashboard/alerts.py b/dashboard/dashboard/alerts.py index e9a684d563a..cfe653637c9 100644 --- a/dashboard/dashboard/alerts.py +++ b/dashboard/dashboard/alerts.py @@ -7,6 +7,7 @@ from __future__ import absolute_import import json +import six from google.appengine.datastore.datastore_query import Cursor from google.appengine.ext import ndb @@ -28,70 +29,75 @@ _MAX_ANOMALIES_TO_SHOW = 500 -class AlertsHandler(request_handler.RequestHandler): - """Shows an overview of recent anomalies for perf sheriffing.""" - - def get(self): - """Renders the UI for listing alerts.""" - self.RenderStaticHtml('alerts.html') - - def post(self): - """Returns dynamic data for listing alerts in response to XHR. - - Request parameters: - sheriff: The name of a sheriff (optional). - triaged: Whether to include triaged alerts (i.e. with a bug ID). - improvements: Whether to include improvement anomalies. - anomaly_cursor: Where to begin a paged query for anomalies (optional). - - Outputs: - JSON data for an XHR request to show a table of alerts. - """ - sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff') - if not _SheriffIsFound(sheriff_name): - self.response.out.write( - json.dumps({'error': 'Sheriff "%s" not found.' % sheriff_name})) - return - - # Cursors are used to fetch paged queries. If none is supplied, then the - # first 500 alerts will be returned. If a cursor is given, the next - # 500 alerts (starting at the given cursor) will be returned. - anomaly_cursor = self.request.get('anomaly_cursor', None) - if anomaly_cursor: - anomaly_cursor = Cursor(urlsafe=anomaly_cursor) - - is_improvement = None - if not bool(self.request.get('improvements')): - is_improvement = False - - bug_id = None - recovered = None - if not bool(self.request.get('triaged')): - bug_id = '' - recovered = False - - max_anomalies_to_show = _MAX_ANOMALIES_TO_SHOW - if self.request.get('max_anomalies_to_show'): - max_anomalies_to_show = int(self.request.get('max_anomalies_to_show')) - - anomalies, next_cursor, count = anomaly.Anomaly.QueryAsync( - start_cursor=anomaly_cursor, - subscriptions=[sheriff_name], - bug_id=bug_id, - is_improvement=is_improvement, - recovered=recovered, - count_limit=_MAX_ANOMALIES_TO_COUNT, - limit=max_anomalies_to_show).get_result() - - values = { - 'anomaly_list': AnomalyDicts(anomalies), - 'anomaly_count': count, - 'sheriff_list': _GetSheriffList(), - 'anomaly_cursor': (next_cursor.urlsafe() if next_cursor else None), - 'show_more_anomalies': next_cursor != None, - } - self.GetDynamicVariables(values) - self.response.out.write(json.dumps(values)) +# Shows an overview of recent anomalies for perf sheriffing. +from flask import make_response, request + + +def AlertsHandlerGet(): + """Renders the UI for listing alerts.""" + return request_handler.RequestHandlerRenderStaticHtml('alerts.html') + + +def AlertsHandlerPost(): + """Returns dynamic data for listing alerts in response to XHR. + + Request parameters: + sheriff: The name of a sheriff (optional). + triaged: Whether to include triaged alerts (i.e. with a bug ID). + improvements: Whether to include improvement anomalies. + anomaly_cursor: Where to begin a paged query for anomalies (optional). + + Outputs: + JSON data for an XHR request to show a table of alerts. + """ + sheriff_name = request.values.get('sheriff', None) + if sheriff_name and not _SheriffIsFound(sheriff_name): + return make_response( + json.dumps({'error': 'Sheriff "%s" not found.' % sheriff_name})) + + # Cursors are used to fetch paged queries. If none is supplied, then the + # first 500 alerts will be returned. If a cursor is given, the next + # 500 alerts (starting at the given cursor) will be returned. + anomaly_cursor = request.values.get('anomaly_cursor', None) + if anomaly_cursor: + anomaly_cursor = Cursor(urlsafe=anomaly_cursor) + + is_improvement = None + if not bool(request.values.get('improvements')): + is_improvement = False + + bug_id = None + recovered = None + if not bool(request.values.get('triaged')): + bug_id = '' + recovered = False + + max_anomalies_to_show = _MAX_ANOMALIES_TO_SHOW + if request.values.get('max_anomalies_to_show'): + max_anomalies_to_show = int(request.values.get('max_anomalies_to_show')) + + subs = None + if sheriff_name: + subs = [sheriff_name] + anomalies, next_cursor, count = anomaly.Anomaly.QueryAsync( + start_cursor=anomaly_cursor, + subscriptions=subs, + bug_id=bug_id, + is_improvement=is_improvement, + recovered=recovered, + count_limit=_MAX_ANOMALIES_TO_COUNT, + limit=max_anomalies_to_show).get_result() + + values = { + 'anomaly_list': AnomalyDicts(anomalies), + 'anomaly_count': count, + 'sheriff_list': _GetSheriffList(), + 'anomaly_cursor': + (six.ensure_str(next_cursor.urlsafe()) if next_cursor else None), + 'show_more_anomalies': next_cursor != None, + } + request_handler.RequestHandlerGetDynamicVariables(values) + return make_response(json.dumps(values)) def _SheriffIsFound(sheriff_name): @@ -139,7 +145,7 @@ def GetAnomalyDict(anomaly_entity, bisect_status=None, v2=False): 'dashboard_link': dashboard_link, 'end_revision': anomaly_entity.end_revision, 'improvement': anomaly_entity.is_improvement, - 'key': anomaly_entity.key.urlsafe(), + 'key': six.ensure_str(anomaly_entity.key.urlsafe()), 'median_after_anomaly': anomaly_entity.median_after_anomaly, 'median_before_anomaly': anomaly_entity.median_before_anomaly, 'recovered': anomaly_entity.recovered, @@ -153,7 +159,7 @@ def GetAnomalyDict(anomaly_entity, bisect_status=None, v2=False): if anomaly_entity.internal_only: bug_labels.add('Restrict-View-Google') tags = set(bug_label_patterns.GetBugLabelsForTest(test_key)) - subscriptions = [s for s in anomaly_entity.subscriptions] + subscriptions = list(anomaly_entity.subscriptions) tags.update([l for s in subscriptions for l in s.bug_labels]) bug_components = set(c for s in subscriptions for c in s.bug_components) for tag in tags: @@ -195,6 +201,6 @@ def GetAnomalyDict(anomaly_entity, bisect_status=None, v2=False): def _GetBisectStatusDict(anomalies): """Returns a dictionary of bug ID to bisect status string.""" - bug_id_list = {a.bug_id for a in anomalies if a.bug_id > 0} + bug_id_list = {a.bug_id for a in anomalies if a.bug_id and a.bug_id > 0} bugs = ndb.get_multi(ndb.Key('Bug', b) for b in bug_id_list) return {b.key.id(): b.latest_bisect_status for b in bugs if b} diff --git a/dashboard/dashboard/alerts_test.py b/dashboard/dashboard/alerts_test.py index cca980002fe..f031ce35ed8 100644 --- a/dashboard/dashboard/alerts_test.py +++ b/dashboard/dashboard/alerts_test.py @@ -6,11 +6,10 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import mock import sys import unittest - -import webapp2 import webtest from dashboard import alerts @@ -21,15 +20,26 @@ from dashboard.models.subscription import Subscription from dashboard.sheriff_config_client import SheriffConfigClient +flask_app = Flask(__name__) + + +@flask_app.route('/alerts', methods=['GET']) +def AlertsHandlerGet(): + return alerts.AlertsHandlerGet() + + +@flask_app.route('/alerts', methods=['POST']) +def AlertsHandlerPost(): + return alerts.AlertsHandlerPost() + @mock.patch.object(SheriffConfigClient, '__init__', mock.MagicMock(return_value=None)) class AlertsTest(testing_common.TestCase): def setUp(self): - super(AlertsTest, self).setUp() - app = webapp2.WSGIApplication([('/alerts', alerts.AlertsHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetSheriffDomains(['chromium.org']) testing_common.SetIsInternalUser('internal@chromium.org', True) self.SetCurrentUser('internal@chromium.org', is_admin=True) @@ -147,7 +157,7 @@ def testV2(self): ).put().get() actual = alerts.GetAnomalyDict(alert, v2=True) del actual['dashboard_link'] - self.assertEqual( + self.assertCountEqual( { 'bug_components': ['component'], 'bug_id': 10, @@ -174,7 +184,7 @@ def testV2(self): def testGet(self): response = self.testapp.get('/alerts') self.assertEqual('text/html', response.content_type) - self.assertIn('Chrome Performance Alerts', response.body) + self.assertIn(b'Chrome Performance Alerts', response.body) def testPost_NoParametersSet_UntriagedAlertsListed(self): key_map = self._AddAlertsToDataStore() @@ -197,7 +207,7 @@ def testPost_NoParametersSet_UntriagedAlertsListed(self): for alert in anomaly_list: self.assertEqual(expected_end_rev, alert['end_revision']) self.assertEqual(expected_end_rev - 5, alert['start_revision']) - self.assertEqual(key_map[expected_end_rev], alert['key']) + self.assertEqual(key_map[expected_end_rev].decode(), alert['key']) self.assertEqual('ChromiumGPU', alert['master']) self.assertEqual('linux-release', alert['bot']) self.assertEqual('scrolling-benchmark', alert['testsuite']) diff --git a/dashboard/dashboard/api/alerts.py b/dashboard/dashboard/api/alerts.py index bb975f80d79..471c2bc0b89 100644 --- a/dashboard/dashboard/api/alerts.py +++ b/dashboard/dashboard/api/alerts.py @@ -6,24 +6,29 @@ from __future__ import division from __future__ import absolute_import +import six + from google.appengine.datastore import datastore_query from dashboard import alerts from dashboard.api import api_request_handler from dashboard.api import utils from dashboard.common import request_handler +from dashboard.common import utils as dashboard_utils from dashboard.models import anomaly from dashboard.models import report_template -class AlertsHandler(api_request_handler.ApiRequestHandler): - """API handler for various alert requests.""" +from flask import request + + +def _CheckUser(): + pass - def _CheckUser(self): - pass - def Post(self): - """Returns alert data in response to API requests. +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def AlertsPost(): + """Returns alert data in response to API requests. Possible list types: keys: A comma-separated list of urlsafe Anomaly keys. @@ -33,57 +38,58 @@ def Post(self): Outputs: Alerts data; see README.md. """ - alert_list = None - response = {} - try: - is_improvement = utils.ParseBool(self.request.get('is_improvement', None)) - recovered = utils.ParseBool(self.request.get('recovered', None)) - start_cursor = self.request.get('cursor', None) - if start_cursor: - start_cursor = datastore_query.Cursor(urlsafe=start_cursor) - min_timestamp = utils.ParseISO8601( - self.request.get('min_timestamp', None)) - max_timestamp = utils.ParseISO8601( - self.request.get('max_timestamp', None)) + alert_list = None + response = {} + try: + is_improvement = utils.ParseBool(request.values.get('is_improvement', None)) + recovered = utils.ParseBool(request.values.get('recovered', None)) + start_cursor = request.values.get('cursor', None) + if start_cursor: + start_cursor = datastore_query.Cursor(urlsafe=start_cursor) + min_timestamp = utils.ParseISO8601( + request.values.get('min_timestamp', None)) + max_timestamp = utils.ParseISO8601( + request.values.get('max_timestamp', None)) - test_keys = [] - for template_id in self.request.get_all('report'): - test_keys.extend(report_template.TestKeysForReportTemplate(template_id)) + test_keys = [] + for template_id in request.values.getlist('report'): + test_keys.extend(report_template.TestKeysForReportTemplate(template_id)) - try: - sheriff = self.request.get('sheriff', None) - alert_list, next_cursor, count = anomaly.Anomaly.QueryAsync( - bot_name=self.request.get('bot', None), - bug_id=self.request.get('bug_id', None), - is_improvement=is_improvement, - key=self.request.get('key', None), - limit=int(self.request.get('limit', 100)), - count_limit=int(self.request.get('count_limit', 0)), - master_name=self.request.get('master', None), - max_end_revision=self.request.get('max_end_revision', None), - max_start_revision=self.request.get('max_start_revision', None), - max_timestamp=max_timestamp, - min_end_revision=self.request.get('min_end_revision', None), - min_start_revision=self.request.get('min_start_revision', None), - min_timestamp=min_timestamp, - recovered=recovered, - subscriptions=[sheriff] if sheriff else None, - start_cursor=start_cursor, - test=self.request.get('test', None), - test_keys=test_keys, - test_suite_name=self.request.get('test_suite', None)).get_result() - response['count'] = count - except AssertionError: - alert_list, next_cursor = [], None - if next_cursor: - response['next_cursor'] = next_cursor.urlsafe() + try: + sheriff = request.values.get('sheriff', None) + alert_list, next_cursor, count = anomaly.Anomaly.QueryAsync( + bot_name=request.values.get('bot', None), + bug_id=request.values.get('bug_id', None), + is_improvement=is_improvement, + key=request.values.get('key', None), + limit=int(request.values.get('limit', 100)), + count_limit=int(request.values.get('count_limit', 0)), + master_name=request.values.get('master', None), + max_end_revision=request.values.get('max_end_revision', None), + max_start_revision=request.values.get('max_start_revision', None), + max_timestamp=max_timestamp, + min_end_revision=request.values.get('min_end_revision', None), + min_start_revision=request.values.get('min_start_revision', None), + min_timestamp=min_timestamp, + recovered=recovered, + subscriptions=[sheriff] if sheriff else None, + start_cursor=start_cursor, + test=request.values.get('test', None), + test_keys=test_keys, + test_suite_name=request.values.get('test_suite', None)).get_result() + response['count'] = count except AssertionError: - # The only known assertion is in InternalOnlyModel._post_get_hook when a - # non-internal user requests an internal-only entity. - raise api_request_handler.BadRequestError('Not found') - except request_handler.InvalidInputError as e: - raise api_request_handler.BadRequestError(e.message) + alert_list, next_cursor = [], None + if next_cursor: + response['next_cursor'] = next_cursor.urlsafe() + except AssertionError as e: + # The only known assertion is in InternalOnlyModel._post_get_hook when a + # non-internal user requests an internal-only entity. + six.raise_from(api_request_handler.BadRequestError('Not found'), e) + except request_handler.InvalidInputError as e: + six.raise_from(api_request_handler.BadRequestError(str(e)), e) - response['anomalies'] = alerts.AnomalyDicts( - alert_list, utils.ParseBool(self.request.get('v2', None))) - return response + response['anomalies'] = alerts.AnomalyDicts( + alert_list, utils.ParseBool(request.values.get('v2', None))) + response = dashboard_utils.ConvertBytesBeforeJsonDumps(response) + return response diff --git a/dashboard/dashboard/api/alerts_test.py b/dashboard/dashboard/api/alerts_test.py index 6ecf051f45d..27c3b3c4aab 100644 --- a/dashboard/dashboard/api/alerts_test.py +++ b/dashboard/dashboard/api/alerts_test.py @@ -7,6 +7,7 @@ from __future__ import absolute_import import datetime +from flask import Flask import json import unittest @@ -18,12 +19,19 @@ from dashboard.models import report_template from dashboard.models.subscription import Subscription +flask_app = Flask(__name__) + + +@flask_app.route('/api/alerts', methods=['POST', 'OPTIONS']) +def ApiAlertsPost(): + return alerts.AlertsPost() + class AlertsGeneralTest(testing_common.TestCase): def setUp(self): - super(AlertsGeneralTest, self).setUp() - self.SetUpApp([('/api/alerts', alerts.AlertsHandler)]) + super().setUp() + self.SetUpFlaskApp(flask_app) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) def _Post(self, **params): diff --git a/dashboard/dashboard/api/api_auth.py b/dashboard/dashboard/api/api_auth.py index c6f64414fae..ecdbfbb57cc 100644 --- a/dashboard/dashboard/api/api_auth.py +++ b/dashboard/dashboard/api/api_auth.py @@ -7,6 +7,7 @@ from __future__ import absolute_import import logging +import six from google.appengine.api import oauth @@ -37,7 +38,15 @@ # allow accessing the APIs by authorized users that generate tokens via # luci-auth command. '446450136466-2hr92jrq8e6i4tnsa56b52vacp7t3936.apps.googleusercontent.com', + # This oauth client id will become default LUCI auth at some point. + # https://chromium-review.googlesource.com/c/infra/luci/luci-go/+/4004539. + '446450136466-mj75ourhccki9fffaq8bc1e50di315po.apps.googleusercontent.com', ] +if utils.IsStagingEnvironment(): + OAUTH_CLIENT_ID_ALLOWLIST = [ + # Staging oauth client id for Pinpoint. + '22573382977-u263jlijs2uiio0uq7qm7vso3vuh7ec5.apps.googleusercontent.com' + ] class ApiAuthException(Exception): @@ -47,26 +56,26 @@ class ApiAuthException(Exception): class OAuthError(ApiAuthException): def __init__(self): - super(OAuthError, self).__init__('User authentication error') + super().__init__('User authentication error') class NotLoggedInError(ApiAuthException): def __init__(self): - super(NotLoggedInError, self).__init__('User not authenticated') + super().__init__('User not authenticated') class InternalOnlyError(ApiAuthException): def __init__(self): - super(InternalOnlyError, self).__init__('User does not have access') + super().__init__('User does not have access') def Authorize(): try: email = utils.GetEmail() - except oauth.OAuthRequestError: - raise OAuthError + except oauth.OAuthRequestError as e: + six.raise_from(OAuthError, e) if not email: raise NotLoggedInError @@ -82,11 +91,11 @@ def Authorize(): logging.error('OAuth client id %s for user %s not in allowlist', client_id, email) raise OAuthError - except oauth.OAuthRequestError: + except oauth.OAuthRequestError as e: # Transient errors when checking the token result should result in HTTP 500, # so catch oauth.OAuthRequestError here, not oauth.Error (which would catch # both fatal and transient errors). - raise OAuthError + six.raise_from(OAuthError, e) logging.info('OAuth user logged in as: %s', email) if utils.IsInternalUser(): diff --git a/dashboard/dashboard/api/api_auth_test.py b/dashboard/dashboard/api/api_auth_test.py index 06af83efe06..9dc1f4ddc69 100644 --- a/dashboard/dashboard/api/api_auth_test.py +++ b/dashboard/dashboard/api/api_auth_test.py @@ -18,7 +18,7 @@ class ApiAuthTest(testing_common.TestCase): def setUp(self): - super(ApiAuthTest, self).setUp() + super().setUp() patcher = mock.patch.object(datastore_hooks, 'SetPrivilegedRequest') self.addCleanup(patcher.stop) diff --git a/dashboard/dashboard/api/api_request_handler.py b/dashboard/dashboard/api/api_request_handler.py index 5cf76a124dc..36c391ec04c 100644 --- a/dashboard/dashboard/api/api_request_handler.py +++ b/dashboard/dashboard/api/api_request_handler.py @@ -6,15 +6,15 @@ from __future__ import division from __future__ import absolute_import +import functools import json import logging import re import traceback -import webapp2 - from dashboard.api import api_auth from dashboard.common import utils +from flask import make_response, request _ALLOWED_ORIGINS = [ 'chromeperf.appspot.com', @@ -22,6 +22,11 @@ 'chromiumdash.appspot.com', 'chromiumdash-staging.googleplex.com', ] +if utils.IsStagingEnvironment(): + _ALLOWED_ORIGINS = [ + 'chromeperf-stage.uc.r.appspot.com', + 'pinpoint-dot-chromeperf-stage.uc.r.appspot.com', + ] class BadRequestError(Exception): @@ -31,114 +36,80 @@ class BadRequestError(Exception): class ForbiddenError(Exception): def __init__(self): - super(ForbiddenError, self).__init__('Access denied') + super().__init__('Access denied') class NotFoundError(Exception): def __init__(self): - super(NotFoundError, self).__init__('Not found') - - -class ApiRequestHandler(webapp2.RequestHandler): - """API handler for api requests. - - Convenience methods handling authentication errors and surfacing them. - """ - - def _CheckUser(self): - """Checks whether the user has permission to make requests. - - This method must be overridden by subclasses to perform access control. - - Raises: - api_auth.NotLoggedInError: The user was not logged in, - and must be to be to make this request. - api_auth.OAuthError: The request was not a valid OAuth request, - or the client ID was not in the allowlist. - ForbiddenError: The user does not have permission to make this request. - """ - raise NotImplementedError() - - def _CheckIsInternalUser(self): - if utils.IsDevAppserver(): - return - self._CheckIsLoggedIn() - if not utils.IsInternalUser(): - raise ForbiddenError() - - def _CheckIsLoggedIn(self): - if utils.IsDevAppserver(): - return - api_auth.Authorize() - - def post(self, *args): - """Returns alert data in response to API requests. - - Outputs: - JSON results. - """ - self._Respond(self.Post, *args) - - def get(self, *args): - self._Respond(self.Get, *args) - - def _Respond(self, cb, *args): - self._SetCorsHeadersIfAppropriate() - - try: - self._CheckUser() - except api_auth.NotLoggedInError as e: - self.WriteErrorMessage(e.message, 401) - return - except api_auth.OAuthError as e: - self.WriteErrorMessage(e.message, 403) - return - except ForbiddenError as e: - self.WriteErrorMessage(e.message, 403) - return - # Allow oauth.Error to manifest as HTTP 500. - - try: - results = cb(*args) - self.response.out.write(json.dumps(results)) - except NotFoundError as e: - self.WriteErrorMessage(e.message, 404) - except (BadRequestError, KeyError, TypeError, ValueError) as e: - self.WriteErrorMessage(e.message, 400) - except ForbiddenError as e: - self.WriteErrorMessage(e.message, 403) - - def options(self, *_): # pylint: disable=invalid-name - self._SetCorsHeadersIfAppropriate() - - def Get(self, *_): - raise NotImplementedError() - - def Post(self, *_): - raise NotImplementedError() - - def _SetCorsHeadersIfAppropriate(self): - self.response.headers['Content-Type'] = 'application/json; charset=utf-8' - set_cors_headers = False - origin = self.request.headers.get('Origin', '') - for allowed in _ALLOWED_ORIGINS: - dev_pattern = re.compile(r'https://[A-Za-z0-9-]+-dot-' + - re.escape(allowed)) - prod_pattern = re.compile(r'https://' + re.escape(allowed)) - if dev_pattern.match(origin) or prod_pattern.match(origin): - set_cors_headers = True - if not set_cors_headers: - return - self.response.headers.add_header('Access-Control-Allow-Origin', origin) - self.response.headers.add_header('Access-Control-Allow-Credentials', 'true') - self.response.headers.add_header('Access-Control-Allow-Methods', - 'GET,OPTIONS,POST') - self.response.headers.add_header('Access-Control-Allow-Headers', - 'Accept,Authorization,Content-Type') - self.response.headers.add_header('Access-Control-Max-Age', '3600') - - def WriteErrorMessage(self, message, status): + super().__init__('Not found') + + +def SafeOriginRegex(prefix, origin): + return re.compile(r'^' + prefix + re.escape(origin) + '$') + + +def RequestHandlerDecoratorFactory(user_checker): + + def RequestHandlerDecorator(request_handler): + + @functools.wraps(request_handler) + def Wrapper(*args): + if request.method == 'OPTIONS': + response = make_response() + _SetCorsHeadersIfAppropriate(request, response) + return response + + try: + user_checker() + except api_auth.NotLoggedInError as e: + return _WriteErrorMessage(str(e), 401) + except api_auth.OAuthError as e: + return _WriteErrorMessage(str(e), 403) + except ForbiddenError as e: + return _WriteErrorMessage(str(e), 403) + # Allow oauth.Error to manifest as HTTP 500. + + try: + results = request_handler(*args) + except NotFoundError as e: + return _WriteErrorMessage(str(e), 404) + except (BadRequestError, KeyError, TypeError, ValueError) as e: + return _WriteErrorMessage(str(e), 400) + except ForbiddenError as e: + return _WriteErrorMessage(str(e), 403) + + response = make_response(json.dumps(results)) + _SetCorsHeadersIfAppropriate(request, response) + return response + + return Wrapper + + return RequestHandlerDecorator + + +def _SetCorsHeadersIfAppropriate(req, resp): + resp.headers['Content-Type'] = 'application/json; charset=utf-8' + set_cors_headers = False + origin = req.headers.get('Origin', '') + for allowed in _ALLOWED_ORIGINS: + dev_pattern = SafeOriginRegex(r'https://[A-Za-z0-9-]+-dot-', allowed) + prod_pattern = SafeOriginRegex(r'https://', allowed) + if dev_pattern.match(origin) or prod_pattern.match(origin): + set_cors_headers = True + if set_cors_headers: + resp.headers['Access-Control-Allow-Origin'] = origin + resp.headers['Access-Control-Allow-Credentials'] = 'true' + resp.headers['Access-Control-Allow-Methods'] = 'GET,OPTIONS,POST' + resp.headers[ + 'Access-Control-Allow-Headers'] = 'Accept,Authorization,Content-Type' + resp.headers['Access-Control-Max-Age'] = '3600' + + +def _WriteErrorMessage(message, status:int): + # Only log an error message if it's a 5xx error + if status >= 500: logging.error(traceback.format_exc()) - self.response.set_status(status) - self.response.out.write(json.dumps({'error': message})) + else: + logging.warning(traceback.format_exc()) + return make_response(json.dumps({'error': message}), status) diff --git a/dashboard/dashboard/api/api_request_handler_test.py b/dashboard/dashboard/api/api_request_handler_test.py index 8a2cac54bbc..c2a95f16324 100644 --- a/dashboard/dashboard/api/api_request_handler_test.py +++ b/dashboard/dashboard/api/api_request_handler_test.py @@ -6,45 +6,64 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json import mock import unittest -import webapp2 import webtest from dashboard.api import api_auth from dashboard.api import api_request_handler from dashboard.common import testing_common +from dashboard.common import utils +flask_app = Flask(__name__) -class TestApiRequestHandler(api_request_handler.ApiRequestHandler): - def _CheckUser(self): - return self._CheckIsInternalUser() +def CheckIsInternalUser(): + if utils.IsDevAppserver(): + return + api_auth.Authorize() + if not utils.IsInternalUser(): + raise api_request_handler.ForbiddenError() - def Post(self): - return {'foo': 'response'} +@flask_app.route('/api/test', methods=['POST', 'OPTIONS']) +def ApiTestPost(): + return ApiTestPostHandler() -class TestApiRequestHandlerForbidden(api_request_handler.ApiRequestHandler): - def _CheckUser(self): - return self._CheckIsInternalUser() +@api_request_handler.RequestHandlerDecoratorFactory(CheckIsInternalUser) +def ApiTestPostHandler(): + return {"foo": "response"} + + +@flask_app.route('/api/forbidden', methods=['POST']) +def ApiForbiddenPost(): + return ApiForbiddenPostHandler() + + +@api_request_handler.RequestHandlerDecoratorFactory(CheckIsInternalUser) +def ApiForbiddenPostHandler(): + raise api_request_handler.ForbiddenError() + + +@flask_app.route('/api/badrequest', methods=['POST']) +def ApiBadRequestPost(): + return ApiFBadRequestPostHandler() - def Post(self): - raise api_request_handler.ForbiddenError() + +@api_request_handler.RequestHandlerDecoratorFactory(CheckIsInternalUser) +def ApiFBadRequestPostHandler(): + raise api_request_handler.BadRequestError('foo') class ApiRequestHandlerTest(testing_common.TestCase): def setUp(self): - super(ApiRequestHandlerTest, self).setUp() - - app = webapp2.WSGIApplication([(r'/api/test', TestApiRequestHandler), - (r'/api/forbidden', - TestApiRequestHandlerForbidden)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) def testPost_Authorized_PostCalled(self): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) @@ -59,21 +78,20 @@ def testPost_ForbiddenError_Raised(self): @mock.patch.object(api_auth, 'Authorize', mock.MagicMock(side_effect=api_auth.OAuthError)) - @mock.patch.object(TestApiRequestHandler, 'Post') - def testPost_Unauthorized_PostNotCalled(self, mock_post): + def testPost_Unauthorized_PostNotCalled(self): + post_handler = ApiTestPostHandler + post_handler = mock.MagicMock() response = self.Post('/api/test', status=403) self.assertEqual({'error': 'User authentication error'}, json.loads(response.body)) - self.assertFalse(mock_post.called) + self.assertFalse(post_handler.called) + @mock.patch.object(api_auth, 'Authorize') - @mock.patch.object( - TestApiRequestHandler, 'Post', - mock.MagicMock(side_effect=api_request_handler.BadRequestError('foo'))) def testPost_BadRequest_400(self, _): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - response = self.Post('/api/test', status=400) + response = self.Post('/api/badrequest', status=400) self.assertEqual({'error': 'foo'}, json.loads(response.body)) @mock.patch.object(api_auth, 'Authorize', @@ -92,54 +110,63 @@ def testPost_NotLoggedInError_401(self): def testOptions_NoOrigin_HeadersNotSet(self): response = self.testapp.options('/api/test') - self.assertListEqual([('Content-Length', '0'), - ('Cache-Control', 'no-cache'), - ('Content-Type', 'application/json; charset=utf-8')], - response.headerlist) + expected_headers = [('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8')] + self.assertCountEqual(expected_headers, response.headerlist) def testOptions_InvalidOrigin_HeadersNotSet(self): api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com'] response = self.testapp.options( '/api/test', headers={'origin': 'https://bar.appspot.com'}) - self.assertListEqual([('Content-Length', '0'), - ('Cache-Control', 'no-cache'), - ('Content-Type', 'application/json; charset=utf-8')], - response.headerlist) + expected_headers = [('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8')] + self.assertCountEqual(expected_headers, response.headerlist) + + def testOptions_InvalidOriginWithSharedPrefix_HeadersNotSet(self): + api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com'] + response = self.testapp.options( + '/api/test', + headers={'origin': 'https://foo.appspot.com.blablabla.com'}) + expected_headers = [('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8')] + self.assertCountEqual(expected_headers, response.headerlist) def testPost_ValidProdOrigin_HeadersSet(self): api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com'] response = self.testapp.options( '/api/test', headers={'origin': 'https://foo.appspot.com'}) - self.assertListEqual( - [('Content-Length', '0'), ('Cache-Control', 'no-cache'), - ('Content-Type', 'application/json; charset=utf-8'), - ('Access-Control-Allow-Origin', 'https://foo.appspot.com'), - ('Access-Control-Allow-Credentials', 'true'), - ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'), - ('Access-Control-Allow-Headers', 'Accept,Authorization,Content-Type'), - ('Access-Control-Max-Age', '3600')], response.headerlist) + expected_headers = [ + ('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8'), + ('Access-Control-Allow-Origin', 'https://foo.appspot.com'), + ('Access-Control-Allow-Credentials', 'true'), + ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'), + ('Access-Control-Allow-Headers', 'Accept,Authorization,Content-Type'), + ('Access-Control-Max-Age', '3600') + ] + self.assertCountEqual(expected_headers, response.headerlist) def testPost_ValidDevOrigin_HeadersSet(self): api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com'] response = self.testapp.options( '/api/test', headers={'origin': 'https://dev-simon-123jkjasdf-dot-foo.appspot.com'}) - self.assertListEqual( - [('Content-Length', '0'), ('Cache-Control', 'no-cache'), - ('Content-Type', 'application/json; charset=utf-8'), - ('Access-Control-Allow-Origin', - 'https://dev-simon-123jkjasdf-dot-foo.appspot.com'), - ('Access-Control-Allow-Credentials', 'true'), - ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'), - ('Access-Control-Allow-Headers', 'Accept,Authorization,Content-Type'), - ('Access-Control-Max-Age', '3600')], response.headerlist) + expected_headers = [('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8'), + ('Access-Control-Allow-Origin', + 'https://dev-simon-123jkjasdf-dot-foo.appspot.com'), + ('Access-Control-Allow-Credentials', 'true'), + ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'), + ('Access-Control-Allow-Headers', + 'Accept,Authorization,Content-Type'), + ('Access-Control-Max-Age', '3600')] + self.assertCountEqual(expected_headers, response.headerlist) def testPost_InvalidOrigin_HeadersNotSet(self): response = self.testapp.options('/api/test') - self.assertListEqual([('Content-Length', '0'), - ('Cache-Control', 'no-cache'), - ('Content-Type', 'application/json; charset=utf-8')], - response.headerlist) + expected_headers = [('Content-Length', '0'), + ('Content-Type', 'application/json; charset=utf-8')] + self.assertCountEqual(expected_headers, response.headerlist) if __name__ == '__main__': diff --git a/dashboard/dashboard/api/bugs.py b/dashboard/dashboard/api/bugs.py deleted file mode 100644 index dd9c9e31f46..00000000000 --- a/dashboard/dashboard/api/bugs.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard.api import api_request_handler -from dashboard.api import utils as api_utils -from dashboard.common import datastore_hooks -from dashboard.common import utils -from dashboard.services import issue_tracker_service - - -class BugsHandler(api_request_handler.ApiRequestHandler): - """API handler for bug requests. - - Convenience methods for getting bug data; only available to internal users. - """ - - def _CheckUser(self): - self._CheckIsLoggedIn() - if not datastore_hooks.IsUnalteredQueryPermitted(): - raise api_request_handler.ForbiddenError() - - def Post(self, *args, **kwargs): - """Returns alert data in response to API requests. - - Argument: - bug_id: issue id on the chromium issue tracker - - Outputs: - JSON data for the bug, see README.md. - """ - if len(args) == 0: - raise api_request_handler.BadRequestError('Invalid bug ID "None".') - bug_id = args[0] - - service = issue_tracker_service.IssueTrackerService( - utils.ServiceAccountHttp()) - project = kwargs.get('project', 'chromium') - - if bug_id == 'recent': - response = service.List( - project=project, - q='opened-after:today-5', - label='Type-Bug-Regression,Performance', - sort='-id') - return {'bugs': response.get('items', [])} - - try: - bug_id = int(bug_id) - except ValueError: - raise api_request_handler.BadRequestError('Invalid bug ID "%s".' % bug_id) - - try: - include_comments = api_utils.ParseBool( - self.request.get('include_comments', None)) - except ValueError: - raise api_request_handler.BadRequestError( - "value of |with_comments| should be 'true' or 'false'") - - issue = service.GetIssue(bug_id, project=project) - bisects = [] - - def _FormatDate(d): - if not d: - return '' - return d.isoformat() - - response = { - 'bug': { - 'author': issue.get('author', {}).get('name'), - 'owner': issue.get('owner', {}).get('name'), - 'legacy_bisects': [{ - 'status': b.status, - 'bot': b.bot, - 'bug_id': b.bug_id, - 'buildbucket_link': ( - 'https://chromeperf.appspot.com/buildbucket_job_status/%s' % - b.buildbucket_job_id), - 'command': b.GetConfigDict()['command'], - 'culprit': None, - 'metric': (b.results_data or {}).get('metric'), - 'started_timestamp': _FormatDate(b.last_ran_timestamp), - } for b in bisects], - 'cc': [cc.get('name') for cc in issue.get('cc', [])], - 'components': issue.get('components', []), - 'projectId': project, - 'id': bug_id, - 'labels': issue.get('labels', []), - 'published': issue.get('published'), - 'updated': issue.get('updated'), - 'state': issue.get('state'), - 'status': issue.get('status'), - 'summary': issue.get('summary'), - } - } - - if include_comments: - comments = service.GetIssueComments(bug_id) - response['bug']['comments'] = [{ - 'content': comment.get('content'), - 'author': comment.get('author'), - 'published': comment.get('published'), - } for comment in comments] - - return response - - -class BugsWithProjectHandler(BugsHandler): - - def Post(self, *args, **kwargs): - # We translate the order of the arguments, because the first arg is the - # project and the second is the bug id. - if len(args) != 2: - raise api_request_handler.BadRequestError( - 'Must have two non-empty arguments to URI.') - return super(BugsWithProjectHandler, self).Post(args[1], project=args[0]) diff --git a/dashboard/dashboard/api/bugs_test.py b/dashboard/dashboard/api/bugs_test.py deleted file mode 100644 index db1229ed0bd..00000000000 --- a/dashboard/dashboard/api/bugs_test.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import mock -import unittest - -from dashboard.api import api_auth -from dashboard.api import bugs -from dashboard.common import testing_common -from dashboard.common import utils - - -class MockIssueTrackerService(object): - """A fake version of IssueTrackerService that returns expected data.""" - - def __init__(self, http=None): - pass - - @classmethod - def List(cls, project='chromium', *unused_args, **unused_kwargs): - del project - return { - 'items': [ - { - 'id': 12345, - 'summary': '5% regression in bot/suite/x at 10000:20000', - 'state': 'open', - 'status': 'New', - 'author': { - 'name': 'exam...@google.com' - }, - }, - { - 'id': 13579, - 'summary': '1% regression in bot/suite/y at 10000:20000', - 'state': 'closed', - 'status': 'WontFix', - 'author': { - 'name': 'exam...@google.com' - }, - }, - ] - } - - @classmethod - def GetIssue(cls, bug_id, project='chromium'): - del bug_id - del project - return { - 'cc': [{ - 'kind': 'monorail#issuePerson', - 'htmlLink': 'https://bugs.chromium.org/u/1253971105', - 'name': 'user@chromium.org', - }, { - 'kind': 'monorail#issuePerson', - 'name': 'hello@world.org', - }], - 'labels': [ - 'Type-Bug', - 'Pri-3', - 'M-61', - ], - 'owner': { - 'kind': 'monorail#issuePerson', - 'htmlLink': 'https://bugs.chromium.org/u/49586776', - 'name': 'owner@chromium.org', - }, - 'id': 737355, - 'author': { - 'kind': 'monorail#issuePerson', - 'htmlLink': 'https://bugs.chromium.org/u/49586776', - 'name': 'author@chromium.org', - }, - 'state': 'closed', - 'status': 'Fixed', - 'summary': 'The bug title', - 'components': [ - 'Blink>ServiceWorker', - 'Foo>Bar', - ], - 'published': '2017-06-28T01:26:53', - 'updated': '2018-03-01T16:16:22', - } - - @classmethod - def GetIssueComments(cls, _, project='chromium'): - del project - return [{ - 'content': 'Comment one', - 'published': '2017-06-28T04:42:55', - 'author': 'comment-one-author@company.com', - }, { - 'content': 'Comment two', - 'published': '2017-06-28T10:16:14', - 'author': 'author-two@chromium.org' - }] - - -class BugsTest(testing_common.TestCase): - - def setUp(self): - super(BugsTest, self).setUp() - self.SetUpApp([ - (r'/api/bugs/p/(.+)/(.+)', bugs.BugsWithProjectHandler), - (r'/api/bugs/(.*)', bugs.BugsHandler), - ]) - - # Add a fake issue tracker service that we can get call values from. - self.original_service = bugs.issue_tracker_service.IssueTrackerService - bugs.issue_tracker_service = mock.MagicMock() - self.service = MockIssueTrackerService - bugs.issue_tracker_service.IssueTrackerService = self.service - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - - def tearDown(self): - super(BugsTest, self).tearDown() - bugs.issue_tracker_service.IssueTrackerService = self.original_service - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_WithValidBug_ShowsData(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self.Post('/api/bugs/123456?include_comments=true') - bug = self.GetJsonValue(response, 'bug') - self.assertEqual('The bug title', bug.get('summary')) - self.assertEqual(2, len(bug.get('cc'))) - self.assertEqual('hello@world.org', bug.get('cc')[1]) - self.assertEqual('Fixed', bug.get('status')) - self.assertEqual('closed', bug.get('state')) - self.assertEqual('author@chromium.org', bug.get('author')) - self.assertEqual('owner@chromium.org', bug.get('owner')) - self.assertEqual('2017-06-28T01:26:53', bug.get('published')) - self.assertEqual('2018-03-01T16:16:22', bug.get('updated')) - self.assertEqual('chromium', bug.get('projectId')) - self.assertEqual(2, len(bug.get('comments'))) - self.assertEqual('Comment two', bug.get('comments')[1].get('content')) - self.assertEqual('author-two@chromium.org', - bug.get('comments')[1].get('author')) - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_WithAlternateUrlWorksWithProjects(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self.Post('/api/bugs/p/chromium/123456?include_comments=true') - bug = self.GetJsonValue(response, 'bug') - self.assertEqual('The bug title', bug.get('summary')) - self.assertEqual(2, len(bug.get('cc'))) - self.assertEqual('hello@world.org', bug.get('cc')[1]) - self.assertEqual('Fixed', bug.get('status')) - self.assertEqual('closed', bug.get('state')) - self.assertEqual('author@chromium.org', bug.get('author')) - self.assertEqual('owner@chromium.org', bug.get('owner')) - self.assertEqual('2017-06-28T01:26:53', bug.get('published')) - self.assertEqual('2018-03-01T16:16:22', bug.get('updated')) - self.assertEqual('chromium', bug.get('projectId')) - self.assertEqual(2, len(bug.get('comments'))) - self.assertEqual('Comment two', bug.get('comments')[1].get('content')) - self.assertEqual('author-two@chromium.org', - bug.get('comments')[1].get('author')) - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_WithValidBugButNoComments(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - - response = self.Post('/api/bugs/123456') - bug = self.GetJsonValue(response, 'bug') - self.assertNotIn('comments', bug) - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_Recent(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self.assertEqual(MockIssueTrackerService.List()['items'], - self.GetJsonValue(self.Post('/api/bugs/recent'), 'bugs')) - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_WithInvalidBugIdParameter_ShowsError(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self.Post('/api/bugs/foo', status=400) - self.assertIn('Invalid bug ID \\"foo\\".', response.body) - - @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - def testPost_NoAccess_ShowsError(self): - self.SetCurrentUserOAuth(testing_common.EXTERNAL_USER) - response = self.Post('/api/bugs/foo', status=403) - self.assertIn('Access denied', response.body) - - def testPost_NoOauthUser(self): - self.SetCurrentUserOAuth(None) - self.Post('/api/bugs/12345', status=401) - - def testPost_BadOauthClientId(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self.SetCurrentClientIdOAuth('invalid') - self.Post('/api/bugs/12345', status=403) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/config.py b/dashboard/dashboard/api/config.py index f4baa1a64e8..146d1f26c0a 100644 --- a/dashboard/dashboard/api/config.py +++ b/dashboard/dashboard/api/config.py @@ -14,14 +14,14 @@ revision_info_client.REVISION_INFO_KEY, ] +from flask import request -class ConfigHandler(api_request_handler.ApiRequestHandler): +def _CheckUser(): + pass - def _CheckUser(self): - pass - - def Post(self): - key = self.request.get('key') - if key not in ALLOWLIST: - return - return namespaced_stored_object.Get(key) +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def ConfigHandlerPost(): + key = request.values.get('key') + if key not in ALLOWLIST: + return None + return namespaced_stored_object.Get(key) diff --git a/dashboard/dashboard/api/config_test.py b/dashboard/dashboard/api/config_test.py index eae11f01e9e..d62950890a6 100644 --- a/dashboard/dashboard/api/config_test.py +++ b/dashboard/dashboard/api/config_test.py @@ -6,6 +6,7 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json import unittest @@ -16,12 +17,19 @@ from dashboard.common import stored_object from dashboard.common import testing_common +flask_app = Flask(__name__) + + +@flask_app.route(r'/api/config', methods=['POST']) +def ConfigHandlerPost(): + return config.ConfigHandlerPost() + class ConfigTest(testing_common.TestCase): def setUp(self): - super(ConfigTest, self).setUp() - self.SetUpApp([(r'/api/config', config.ConfigHandler)]) + super().setUp() + self.SetUpFlaskApp(flask_app) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) external_key = namespaced_stored_object.NamespaceKey( config.ALLOWLIST[0], datastore_hooks.EXTERNAL) @@ -31,7 +39,7 @@ def setUp(self): stored_object.Set(internal_key, datastore_hooks.INTERNAL) def _Post(self, suite): - return json.loads(self.Post('/api/config?key=' + suite).body) + return json.loads(self.Post('/api/config', {'key': suite}).body) def testNotInAllowlist(self): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) diff --git a/dashboard/dashboard/api/describe.py b/dashboard/dashboard/api/describe.py index ec671454354..b1bc44736f3 100644 --- a/dashboard/dashboard/api/describe.py +++ b/dashboard/dashboard/api/describe.py @@ -9,15 +9,16 @@ from dashboard import update_test_suite_descriptors from dashboard.api import api_request_handler +from flask import request -class DescribeHandler(api_request_handler.ApiRequestHandler): - """API handler for describing test suites.""" - def _CheckUser(self): - pass +def _CheckUser(): + pass - def Post(self): - master = self.request.get('master') - suite = self.request.get('test_suite') - return update_test_suite_descriptors.FetchCachedTestSuiteDescriptor( - master, suite) + +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def DescribePost(): + master = request.values.get('master') + suite = request.values.get('test_suite') + return update_test_suite_descriptors.FetchCachedTestSuiteDescriptor( + master, suite) diff --git a/dashboard/dashboard/api/describe_test.py b/dashboard/dashboard/api/describe_test.py index fb56c35920d..b3374f1f354 100644 --- a/dashboard/dashboard/api/describe_test.py +++ b/dashboard/dashboard/api/describe_test.py @@ -6,6 +6,7 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json import unittest @@ -18,12 +19,19 @@ TEST_SUITE_NAME = 'test_suite' +flask_app = Flask(__name__) + + +@flask_app.route(r'/api/describe', methods=['POST', 'OPTIONS']) +def DescribeHandlerPost(): + return describe.DescribePost() + class DescribeTest(testing_common.TestCase): def setUp(self): - super(DescribeTest, self).setUp() - self.SetUpApp([(r'/api/describe', describe.DescribeHandler)]) + super().setUp() + self.SetUpFlaskApp(flask_app) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) external_key = update_test_suite_descriptors.CacheKey( @@ -74,7 +82,7 @@ def setUp(self): }) def _Post(self, suite): - return json.loads(self.Post('/api/describe?test_suite=' + suite).body) + return json.loads(self.Post('/api/describe', {'test_suite': suite}).body) def testInternal(self): self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) @@ -90,5 +98,6 @@ def testAnonymous(self): self.assertEqual('external:bot', response['bots'][0]) + if __name__ == '__main__': unittest.main() diff --git a/dashboard/dashboard/api/examples/service_account.py b/dashboard/dashboard/api/examples/service_account.py index 720bd4f122c..97922170202 100644 --- a/dashboard/dashboard/api/examples/service_account.py +++ b/dashboard/dashboard/api/examples/service_account.py @@ -3,6 +3,14 @@ # found in the LICENSE file. """An example of using perf dashboard API with a service account. +############# DEPRECATION WARNING ############# +# Oauth2client is now deprecated. +# As of Oct 11th, 2022, we are migrating oauth2client to google-auth. +# Update on examples will be available later. +# Please contact our team for urgent needs: +# browser-perf-engprod@google.com +################################################# + Getting set up: 1. Install httplib2and oauth2client python modules: `pip install httplib2` diff --git a/dashboard/dashboard/api/examples/user_account.py b/dashboard/dashboard/api/examples/user_account.py index 2cce4953289..7ccad6adceb 100644 --- a/dashboard/dashboard/api/examples/user_account.py +++ b/dashboard/dashboard/api/examples/user_account.py @@ -3,6 +3,14 @@ # found in the LICENSE file. """An example of using perf dashboard API with your own user account. +############# DEPRECATION WARNING ############# +# Oauth2client is now deprecated. +# As of Oct 11th, 2022, we are migrating oauth2client to google-auth. +# Update on examples will be available later. +# Please contact our team for urgent needs: +# browser-perf-engprod@google.com +################################################# + Getting set up: 1. Install httplib2and oauth2client python modules: `pip install httplib2` @@ -22,6 +30,7 @@ import httplib2 from oauth2client import client +from six.moves import input # pylint:disable=redefined-builtin # See security notes about why the 'secret' doesn't need to be kept secret here: # https://cs.chromium.org/chromium/tools/depot_tools/auth.py @@ -33,6 +42,15 @@ SCOPES = ['https://www.googleapis.com/auth/userinfo.email'] +############# DEPRECATION WARNING ############# +# Request using OOB flow will be blocked from Oct 3, 2022. +# More info: +# https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob +# Please do *not* refer to this example. We will update it with a +# valid solution. +# For urgent request, please contact our team directly: +# browser-perf-engprod@google.com +############# DEPRECATION WARNING ############# def MakeApiRequest(): flow = client.OAuth2WebServerFlow( OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, SCOPES, approval_prompt='force') @@ -41,8 +59,7 @@ def MakeApiRequest(): print('Go to the following link in your browser:\n\n' ' %s\n' % authorize_url) - # pylint:disable=raw_input-builtin - code = raw_input('Enter verification code: ').strip() + code = input('Enter verification code: ').strip() try: creds = flow.step2_exchange(code) except client.FlowExchangeError as e: diff --git a/dashboard/dashboard/api/existing_bug.py b/dashboard/dashboard/api/existing_bug.py deleted file mode 100644 index b6e3ee7511d..00000000000 --- a/dashboard/dashboard/api/existing_bug.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from google.appengine.ext import ndb - -from dashboard import associate_alerts -from dashboard.api import api_request_handler -from dashboard.common import utils - - -class ExistingBugHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - if not utils.IsValidSheriffUser(): - raise api_request_handler.ForbiddenError() - - def Post(self): - keys = self.request.get_all('key') - bug_id = int(self.request.get('bug')) - project_id = self.request.get('project_id', 'chromium') - alerts = ndb.get_multi([ndb.Key(urlsafe=k) for k in keys]) - associate_alerts.AssociateAlerts(bug_id, project_id, alerts) - return {} diff --git a/dashboard/dashboard/api/existing_bug_test.py b/dashboard/dashboard/api/existing_bug_test.py deleted file mode 100644 index d152bc8060e..00000000000 --- a/dashboard/dashboard/api/existing_bug_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -# Importing mock_oauth2_decorator before file_bug mocks out -# OAuth2Decorator usage in that file. -# pylint: disable=unused-import -from dashboard import mock_oauth2_decorator -# pylint: enable=unused-import - -from dashboard.api import api_auth -from dashboard.api import existing_bug -from dashboard.common import testing_common -from dashboard.models import anomaly -from dashboard.models import graph_data - - -class ExistingBugTest(testing_common.TestCase): - - def setUp(self): - super(ExistingBugTest, self).setUp() - self.SetUpApp([('/api/existing_bug', existing_bug.ExistingBugHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - self.SetCurrentUserOAuth(None) - testing_common.SetSheriffDomains(['example.com']) - - def _Post(self, **params): - return json.loads(self.Post('/api/existing_bug', params).body) - - def testInvalidUser(self): - self.Post('/api/existing_bug', status=403) - - def testSuccess(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - path = 'm/b/s/m/c' - test = graph_data.TestMetadata( - has_rows=True, - id=path, - improvement_direction=anomaly.DOWN, - units='units') - test.put() - key = anomaly.Anomaly(test=test.key, start_revision=1, end_revision=1).put() - graph_data.Row(id=1, parent=test.key, value=1).put() - response = self._Post(key=key.urlsafe(), bug=12345) - self.assertEqual({}, response) - self.assertEqual(12345, key.get().bug_id) - self.assertEqual('chromium', key.get().project_id) diff --git a/dashboard/dashboard/api/list_timeseries.py b/dashboard/dashboard/api/list_timeseries.py deleted file mode 100644 index f48a9197261..00000000000 --- a/dashboard/dashboard/api/list_timeseries.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard.api import api_request_handler -from dashboard.common import utils -from dashboard.models import graph_data - - -class ListTimeseriesHandler(api_request_handler.ApiRequestHandler): - """API handler for listing timeseries for a benchmark.""" - - def _CheckUser(self): - self._CheckIsLoggedIn() - - def Post(self, *args): - """Returns list in response to API requests. - - Argument: - benchmark: name of the benchmark to list tests for - - Outputs: - JSON list of monitored timeseries for the benchmark, see README.md. - """ - benchmark = args[0] - sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff') - query = graph_data.TestMetadata.query() - query = query.filter(graph_data.TestMetadata.suite_name == benchmark) - query = query.filter(graph_data.TestMetadata.has_rows == True) - query = query.filter(graph_data.TestMetadata.deprecated == False) - if sheriff_name and sheriff_name != 'all': - print(sheriff_name, 'xxxxxxxxxxxxxxxxxxxxxxxx') - raise api_request_handler.BadRequestError( - 'Not supporting sheriff name anymore. Use `all` instead.') - - keys = query.fetch(keys_only=True) - return [utils.TestPath(key) for key in keys] diff --git a/dashboard/dashboard/api/list_timeseries_test.py b/dashboard/dashboard/api/list_timeseries_test.py deleted file mode 100644 index a0ecdc728b0..00000000000 --- a/dashboard/dashboard/api/list_timeseries_test.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import unittest - -from dashboard.api import api_auth -from dashboard.api import list_timeseries -from dashboard.common import testing_common - - -class ListTimeseriesTest(testing_common.TestCase): - - def setUp(self): - super(ListTimeseriesTest, self).setUp() - self.SetUpApp([(r'/api/list_timeseries/(.*)', - list_timeseries.ListTimeseriesHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - - def _AddData(self): - """Adds sample TestMetadata entities and returns their keys.""" - testing_common.AddTests( - ['ChromiumPerf'], ['linux', 'win', 'mac'], { - 'v8': { - 'sunspider': { - 'Total': {} - }, - 'octane': { - 'Total': {} - }, - 'memory': { - 'Total': {} - }, - }, - 'page_cycler': { - 'warm': { - 'cnn': {}, - 'facebook': {}, - 'yahoo': {} - }, - 'cold': { - 'nytimes': {}, - 'cnn': {}, - 'yahoo': {} - } - } - }) - - for bot in ['linux', 'win', 'mac']: - for path in ['sunspider/Total', 'octane/Total', 'octane', 'memory/Total']: - testing_common.AddRows('ChromiumPerf/%s/v8/%s' % (bot, path), - [200, 300, 400, 500]) - for page in [ - 'warm/cnn', 'warm/facebook', 'warm/yahoo', 'cold/nytimes', 'cold/cnn', - 'cold/yahoo' - ]: - testing_common.AddRows('ChromiumPerf/%s/page_cycler/%s' % (bot, page), - [100, 200, 300]) - - def testPost_External(self): - self.SetCurrentUserOAuth(testing_common.EXTERNAL_USER) - self._AddData() - - response = self.Post('/api/list_timeseries/v8', {'sheriff': 'all'}) - paths = json.loads(response.body) - self.assertEqual( - { - 'ChromiumPerf/mac/v8/sunspider/Total', - 'ChromiumPerf/mac/v8/octane/Total', - 'ChromiumPerf/mac/v8/octane', - 'ChromiumPerf/mac/v8/memory/Total', - 'ChromiumPerf/linux/v8/sunspider/Total', - 'ChromiumPerf/linux/v8/octane/Total', - 'ChromiumPerf/linux/v8/octane', - 'ChromiumPerf/linux/v8/memory/Total', - 'ChromiumPerf/win/v8/sunspider/Total', - 'ChromiumPerf/win/v8/octane/Total', - 'ChromiumPerf/win/v8/octane', - 'ChromiumPerf/win/v8/memory/Total', - }, set(paths)) - - def testPost_AllSheriff_ListsAllV8Perf(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self._AddData() - - response = self.Post('/api/list_timeseries/v8', {'sheriff': 'all'}) - paths = json.loads(response.body) - self.assertEqual( - { - 'ChromiumPerf/mac/v8/sunspider/Total', - 'ChromiumPerf/mac/v8/octane/Total', - 'ChromiumPerf/mac/v8/octane', - 'ChromiumPerf/mac/v8/memory/Total', - 'ChromiumPerf/linux/v8/sunspider/Total', - 'ChromiumPerf/linux/v8/octane/Total', - 'ChromiumPerf/linux/v8/octane', - 'ChromiumPerf/linux/v8/memory/Total', - 'ChromiumPerf/win/v8/sunspider/Total', - 'ChromiumPerf/win/v8/octane/Total', - 'ChromiumPerf/win/v8/octane', - 'ChromiumPerf/win/v8/memory/Total', - }, set(paths)) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/new_bug.py b/dashboard/dashboard/api/new_bug.py deleted file mode 100644 index 1b19835d58c..00000000000 --- a/dashboard/dashboard/api/new_bug.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard.api import api_request_handler -from dashboard.api import utils as api_utils -from dashboard.common import file_bug -from dashboard.common import utils - - -class NewBugHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - if not utils.IsValidSheriffUser(): - raise api_request_handler.ForbiddenError() - - def Post(self): - owner = self.request.get('owner') - cc = self.request.get('cc') - summary = self.request.get('summary') - description = self.request.get('description') - project = self.request.get('project', 'chromium') - labels = self.request.get_all('label') - components = self.request.get_all('component') - keys = self.request.get_all('key') - bisect = api_utils.ParseBool(self.request.get('bisect', 'true')) - http = utils.ServiceAccountHttp() - - return file_bug.FileBug(http, owner, cc, summary, description, project, - labels, components, keys, bisect) diff --git a/dashboard/dashboard/api/new_bug_test.py b/dashboard/dashboard/api/new_bug_test.py deleted file mode 100644 index 7222069640a..00000000000 --- a/dashboard/dashboard/api/new_bug_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import mock - -from dashboard.api import api_auth -from dashboard.api import new_bug -from dashboard.common import testing_common -from dashboard.models import anomaly -from dashboard.models import graph_data - - -class NewBugTest(testing_common.TestCase): - - def setUp(self): - super(NewBugTest, self).setUp() - self.SetUpApp([('/api/new_bug', new_bug.NewBugHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - self.SetCurrentUserOAuth(None) - testing_common.SetSheriffDomains(['example.com']) - self.PatchObject(new_bug.utils, 'ServiceAccountHttp', - mock.Mock(return_value=None)) - self._issue_tracker_service = testing_common.FakeIssueTrackerService() - self.PatchObject(new_bug.file_bug.issue_tracker_service, - 'IssueTrackerService', - lambda *_: self._issue_tracker_service) - self.PatchObject(new_bug.file_bug.app_identity, - 'get_default_version_hostname', mock.Mock(return_value='')) - - def _Post(self, **params): - return json.loads(self.Post('/api/new_bug', params).body) - - def testInvalidUser(self): - self.Post('/api/new_bug', status=403) - - @mock.patch.object(new_bug.file_bug.auto_bisect, 'StartNewBisectForBug', - mock.MagicMock()) - def testSuccess(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - path = 'm/b/s/m/c' - test = graph_data.TestMetadata( - has_rows=True, - id=path, - improvement_direction=anomaly.DOWN, - units='units') - test.put() - key = anomaly.Anomaly( - test=test.key, start_revision=1, end_revision=1).put().urlsafe() - graph_data.Row(id=1, parent=test.key, value=1).put() - response = self._Post(key=key) - self.assertEqual(12345, response['bug_id']) - - @mock.patch.object(new_bug.file_bug.auto_bisect, 'StartNewBisectForBug', - mock.MagicMock()) - def testHasCC(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - path = 'm/b/s/m/c' - test = graph_data.TestMetadata( - has_rows=True, - id=path, - improvement_direction=anomaly.DOWN, - units='units') - test.put() - key = anomaly.Anomaly( - test=test.key, start_revision=1, end_revision=1).put().urlsafe() - graph_data.Row(id=1, parent=test.key, value=1).put() - response = self._Post(key=key, cc='user@example.com,other@example.com') - self.assertEqual(12345, response['bug_id']) diff --git a/dashboard/dashboard/api/new_pinpoint.py b/dashboard/dashboard/api/new_pinpoint.py deleted file mode 100644 index 4851ec6f979..00000000000 --- a/dashboard/dashboard/api/new_pinpoint.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard import pinpoint_request -from dashboard.api import api_request_handler -from dashboard.common import descriptor -from dashboard.common import utils - - -# This is just like PinpointNewBisectRequestHandler (/pinpoint/new/bisect), -# except 1. this is dispatched for /api/new_pinpoint, so utils.GetEmail() uses -# OAuth instead of cookies, and 2. this accepts a Descriptor instead of a test -# path. -class NewPinpointHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - if not utils.IsValidSheriffUser(): - raise api_request_handler.ForbiddenError() - - def Post(self): - params = dict((a, self.request.get(a)) for a in self.request.arguments()) - desc = descriptor.Descriptor(params['suite'], params['measurement'], - params['bot'], params.get('case'), - params.get('statistic')) - params['test_path'] = list(desc.ToTestPathsSync())[0] - # TODO Find the first test_path that exists, maybe strip statistic. - params['story_filter'] = params.get('case') - return pinpoint_request.NewPinpointBisect(params) diff --git a/dashboard/dashboard/api/nudge_alert.py b/dashboard/dashboard/api/nudge_alert.py deleted file mode 100644 index e3fb36965b8..00000000000 --- a/dashboard/dashboard/api/nudge_alert.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from google.appengine.ext import ndb - -from dashboard.api import api_request_handler -from dashboard.common import utils - - -class NudgeAlertHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - if not utils.IsValidSheriffUser(): - raise api_request_handler.ForbiddenError() - - def Post(self): - keys = self.request.get_all('key') - start = self.request.get('new_start_revision') - end = self.request.get('new_end_revision') - try: - start = int(start) - end = int(end) - except ValueError: - return {'error': 'Invalid revisions %s, %s' % (start, end)} - alerts = ndb.get_multi([ndb.Key(urlsafe=k) for k in keys]) - for a in alerts: - a.start_revision = start - a.end_revision = end - ndb.put_multi(alerts) - return {} diff --git a/dashboard/dashboard/api/nudge_alert_test.py b/dashboard/dashboard/api/nudge_alert_test.py deleted file mode 100644 index 4457602e54b..00000000000 --- a/dashboard/dashboard/api/nudge_alert_test.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2019 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -# Importing mock_oauth2_decorator before file_bug mocks out -# OAuth2Decorator usage in that file. -# pylint: disable=unused-import -from dashboard import mock_oauth2_decorator -# pylint: enable=unused-import - -from dashboard.api import api_auth -from dashboard.api import nudge_alert -from dashboard.common import testing_common -from dashboard.models import anomaly -from dashboard.models import graph_data - - -class NudgeAlertTest(testing_common.TestCase): - - def setUp(self): - super(NudgeAlertTest, self).setUp() - self.SetUpApp([('/api/nudge_alert', nudge_alert.NudgeAlertHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - self.SetCurrentUserOAuth(None) - testing_common.SetSheriffDomains(['example.com']) - - def _Post(self, **params): - return json.loads(self.Post('/api/nudge_alert', params).body) - - def testInvalidUser(self): - self.Post('/api/nudge_alert', status=403) - - def testSuccess(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - path = 'm/b/s/m/c' - test = graph_data.TestMetadata( - has_rows=True, - id=path, - improvement_direction=anomaly.DOWN, - units='units') - test.put() - key = anomaly.Anomaly(test=test.key, start_revision=1, end_revision=1).put() - graph_data.Row(id=1, parent=test.key, value=1).put() - response = self._Post( - key=key.urlsafe(), new_start_revision=3, new_end_revision=4) - self.assertEqual({}, response) - self.assertEqual(3, key.get().start_revision) - self.assertEqual(4, key.get().end_revision) diff --git a/dashboard/dashboard/api/report_generate.py b/dashboard/dashboard/api/report_generate.py deleted file mode 100644 index 9c4f48df42a..00000000000 --- a/dashboard/dashboard/api/report_generate.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard.api import api_request_handler -# Module imported for its side effects, to register static report templates. -import dashboard.common.system_health_report # pylint: disable=unused-import -from dashboard.models import report_template - - -class ReportGenerateHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - pass - - def Post(self): - revisions = self.request.get('revisions', None) - if revisions is None: - raise api_request_handler.BadRequestError - try: - revisions = [int(r) if r != 'latest' else r for r in revisions.split(',')] - except ValueError: - raise api_request_handler.BadRequestError - - try: - template_id = int(self.request.get('id')) - except ValueError: - raise api_request_handler.BadRequestError - try: - report = report_template.GetReport(template_id, revisions) - except AssertionError: - # The caller has requested internal-only data but is not authorized. - raise api_request_handler.NotFoundError - if report is None: - raise api_request_handler.NotFoundError - - return report diff --git a/dashboard/dashboard/api/report_generate_test.py b/dashboard/dashboard/api/report_generate_test.py deleted file mode 100644 index ced9d473bb2..00000000000 --- a/dashboard/dashboard/api/report_generate_test.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime -import json -import unittest - -from dashboard.api import api_auth -from dashboard.api import report_generate -from dashboard.common import testing_common -from dashboard.models import report_template - - -@report_template.Static( - internal_only=False, - template_id=421533545, - name='Test:External', - modified=datetime.datetime.now()) -def _External(unused_revisions): - return {'url': 'external'} - - -@report_template.Static( - internal_only=True, - template_id=577335040, - name='Test:Internal', - modified=datetime.datetime.now()) -def _Internal(unused_revisions): - return {'url': 'internal'} - - -class ReportGenerateTest(testing_common.TestCase): - - def setUp(self): - super(ReportGenerateTest, self).setUp() - self.SetUpApp([ - ('/api/report/generate', report_generate.ReportGenerateHandler), - ]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - - def _Post(self, **params): - return json.loads(self.Post('/api/report/generate', params).body) - - def testInvalid(self): - self.Post('/api/report/generate', dict(), status=400) - self.Post('/api/report/generate', dict(revisions='a'), status=400) - self.Post('/api/report/generate', dict(revisions='0'), status=400) - self.Post('/api/report/generate', dict(revisions='0', id='x'), status=400) - self.Post('/api/report/generate', dict(revisions='0', id='1'), status=404) - - def testInternal_GetReport(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self._Post(revisions='latest', id=577335040) - self.assertEqual({'url': 'internal'}, response['report']) - self.assertEqual(577335040, response['id']) - self.assertEqual('Test:Internal', response['name']) - self.assertEqual(True, response['internal']) - - def testAnonymous_GetReport(self): - self.SetCurrentUserOAuth(None) - self.Post( - '/api/report/generate', - dict(revisions='latest', id=577335040), - status=404) - response = self._Post(revisions='latest', id=421533545) - self.assertEqual({'url': 'external'}, response['report']) - self.assertEqual(421533545, response['id']) - self.assertEqual('Test:External', response['name']) - self.assertEqual(False, response['internal']) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/report_names.py b/dashboard/dashboard/api/report_names.py deleted file mode 100644 index 578760c4c47..00000000000 --- a/dashboard/dashboard/api/report_names.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -from dashboard.api import api_request_handler -# Module imported for its side effects, to register static report templates. -import dashboard.common.system_health_report # pylint: disable=unused-import -from dashboard.models import report_template - - -class ReportNamesHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - pass - - def Post(self): - return report_template.List() diff --git a/dashboard/dashboard/api/report_names_test.py b/dashboard/dashboard/api/report_names_test.py deleted file mode 100644 index 8e73a6be32b..00000000000 --- a/dashboard/dashboard/api/report_names_test.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import unittest - -from dashboard.api import api_auth -from dashboard.api import report_names -from dashboard.common import testing_common -from dashboard.models import report_template - - -class ReportNamesTest(testing_common.TestCase): - - def setUp(self): - super(ReportNamesTest, self).setUp() - self.SetUpApp([('/api/report_names', report_names.ReportNamesHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - report_template.ReportTemplate(internal_only=False, name='external').put() - report_template.ReportTemplate(internal_only=True, name='internal').put() - - def testInternal(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = json.loads(self.Post('/api/report_names').body) - names = [d['name'] for d in response] - self.assertIn('external', names) - self.assertIn('internal', names) - - def testAnonymous(self): - self.SetCurrentUserOAuth(None) - response = json.loads(self.Post('/api/report_names').body) - names = [d['name'] for d in response] - self.assertIn('external', names) - self.assertNotIn('internal', names) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/report_template.py b/dashboard/dashboard/api/report_template.py deleted file mode 100644 index 6a32f3bb757..00000000000 --- a/dashboard/dashboard/api/report_template.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from dashboard.api import api_request_handler -from dashboard.models import report_template - - -class ReportTemplateHandler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - self._CheckIsInternalUser() - - def Post(self): - template = json.loads(self.request.get('template')) - name = self.request.get('name', None) - owners = self.request.get('owners', None) - if template is None or name is None or owners is None: - raise api_request_handler.BadRequestError - - owners = owners.split(',') - template_id = self.request.get('id', None) - if template_id is not None: - try: - template_id = int(template_id) - except ValueError: - raise api_request_handler.BadRequestError - try: - report_template.PutTemplate(template_id, name, owners, template) - except ValueError: - raise api_request_handler.BadRequestError - return report_template.List() diff --git a/dashboard/dashboard/api/report_template_test.py b/dashboard/dashboard/api/report_template_test.py deleted file mode 100644 index fdfd7bb012a..00000000000 --- a/dashboard/dashboard/api/report_template_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2018 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import unittest - -from google.appengine.ext import ndb - -from dashboard.api import api_auth -from dashboard.api import report_template as api_report_template -from dashboard.common import testing_common -from dashboard.models import report_template - - -class ReportTemplateTest(testing_common.TestCase): - - def setUp(self): - super(ReportTemplateTest, self).setUp() - self.SetUpApp([ - ('/api/report/template', api_report_template.ReportTemplateHandler), - ]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - - def _Post(self, **params): - return json.loads(self.Post('/api/report/template', params).body) - - def testUnprivileged(self): - self.Post( - '/api/report/template', - dict( - owners=testing_common.INTERNAL_USER.email(), - name='Test:New', - template=json.dumps({'rows': []})), - status=403) - - def testInvalid(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self.Post( - '/api/report/template', - dict(template=json.dumps({'rows': []})), - status=400) - self.Post( - '/api/report/template', - dict(name='name', template=json.dumps({'rows': []})), - status=400) - self.Post( - '/api/report/template', - dict(owners='o', template=json.dumps({'rows': []})), - status=400) - - def testInternal_PutTemplate(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self._Post( - owners=testing_common.INTERNAL_USER.email(), - name='Test:New', - template=json.dumps({'rows': []})) - names = [d['name'] for d in response] - self.assertIn('Test:New', names) - - template = report_template.ReportTemplate.query( - report_template.ReportTemplate.name == 'Test:New').get() - self.assertEqual({'rows': []}, template.template) - - def testInternal_UpdateTemplate(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self._Post( - owners=testing_common.INTERNAL_USER.email(), - name='Test:New', - template=json.dumps({'rows': []})) - new_id = [info['id'] for info in response if info['name'] == 'Test:New'][0] - response = self._Post( - owners=testing_common.INTERNAL_USER.email(), - name='Test:Updated', - id=new_id, - template=json.dumps({'rows': []})) - template = ndb.Key('ReportTemplate', new_id).get() - self.assertEqual('Test:Updated', template.name) - - def testAnonymous_PutTemplate(self): - self.SetCurrentUserOAuth(None) - self.Post( - '/api/report/template', - dict(template=json.dumps({'rows': []}), name='n', owners='o'), - status=401) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/test_suites.py b/dashboard/dashboard/api/test_suites.py index 192950a484d..12f9e8bd385 100644 --- a/dashboard/dashboard/api/test_suites.py +++ b/dashboard/dashboard/api/test_suites.py @@ -10,11 +10,10 @@ from dashboard.api import api_request_handler -class TestSuitesHandler(api_request_handler.ApiRequestHandler): - """API handler for listing test suites.""" +def _CheckUser(): + pass - def _CheckUser(self): - pass - def Post(self): - return update_test_suites.FetchCachedTestSuites2() +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def TestSuitesPost(): + return update_test_suites.FetchCachedTestSuites2() diff --git a/dashboard/dashboard/api/test_suites_test.py b/dashboard/dashboard/api/test_suites_test.py index 33344ac1a15..da57d8921ff 100644 --- a/dashboard/dashboard/api/test_suites_test.py +++ b/dashboard/dashboard/api/test_suites_test.py @@ -6,6 +6,7 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json import unittest @@ -17,12 +18,19 @@ from dashboard.common import stored_object from dashboard.common import testing_common +flask_app = Flask(__name__) + + +@flask_app.route('/api/test_suites', methods=['POST', 'OPTIONS']) +def TestSuitesPost(): + return test_suites.TestSuitesPost() + class TestSuitesTest(testing_common.TestCase): def setUp(self): - super(TestSuitesTest, self).setUp() - self.SetUpApp([('/api/test_suites', test_suites.TestSuitesHandler)]) + super().setUp() + self.SetUpFlaskApp(flask_app) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) external_key = namespaced_stored_object.NamespaceKey( update_test_suites.TEST_SUITES_2_CACHE_KEY, datastore_hooks.EXTERNAL) diff --git a/dashboard/dashboard/api/timeseries.py b/dashboard/dashboard/api/timeseries.py deleted file mode 100644 index 414bd0b6348..00000000000 --- a/dashboard/dashboard/api/timeseries.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime - -from dashboard.api import api_request_handler -from dashboard.common import datastore_hooks -from dashboard.common import namespaced_stored_object -from dashboard.common import utils -from dashboard.models import graph_data - - -class BadRequestError(Exception): - pass - - -class TimeseriesHandler(api_request_handler.ApiRequestHandler): - """API handler for getting timeseries data.""" - - def _CheckUser(self): - self._CheckIsLoggedIn() - - def Post(self, *args): - """Returns timeseries data in response to API requests. - - Argument: - test_path: Full path of test timeseries - - Outputs: - JSON timeseries data for the test_path, see README.md. - """ - try: - days = int(self.request.get('num_days', 30)) - except ValueError: - raise api_request_handler.BadRequestError( - 'Invalid num_days parameter %s' % self.request.get('num_days')) - if days <= 0: - raise api_request_handler.BadRequestError( - 'num_days cannot be negative (%s)' % days) - before = datetime.datetime.now() - datetime.timedelta(days=days) - - test_path = args[0] - test_key = utils.TestKey(test_path) - test = test_key.get() - if not test: - raise api_request_handler.BadRequestError('Invalid test_path %s' % - test_path) - - assert (datastore_hooks.IsUnalteredQueryPermitted() - or not test.internal_only) - datastore_hooks.SetSinglePrivilegedRequest() - - q = graph_data.Row.query() - q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) - q = q.filter(graph_data.Row.timestamp > before) - - rows = q.fetch() - if not rows: - return [] - revisions = [rev for rev in rows[0].to_dict() if rev.startswith('r_')] - header = ['revision', 'value', 'timestamp'] + revisions - timeseries = [header] - for row in sorted(rows, key=lambda r: r.revision): - timeseries.append([self._GetValue(row, a) for a in header]) - - return { - 'timeseries': timeseries, - 'test_path': test_path, - 'revision_logs': namespaced_stored_object.Get('revision_info'), - 'improvement_direction': test.improvement_direction, - } - - def _GetValue(self, row, attr): - value = getattr(row, attr, None) - if attr == 'timestamp': - return value.isoformat() - return value diff --git a/dashboard/dashboard/api/timeseries2.py b/dashboard/dashboard/api/timeseries2.py index 2db7dd0ff9b..ce8125f145c 100644 --- a/dashboard/dashboard/api/timeseries2.py +++ b/dashboard/dashboard/api/timeseries2.py @@ -6,6 +6,8 @@ from __future__ import division from __future__ import absolute_import +import six + from google.appengine.ext import ndb from dashboard import alerts @@ -27,38 +29,40 @@ COLUMNS_REQUIRING_ROWS = {'timestamp', 'revisions', 'annotations'}.union(descriptor.STATISTICS) - -class Timeseries2Handler(api_request_handler.ApiRequestHandler): - - def _CheckUser(self): - pass - - def Post(self): - desc = descriptor.Descriptor( - test_suite=self.request.get('test_suite'), - measurement=self.request.get('measurement'), - bot=self.request.get('bot'), - test_case=self.request.get('test_case'), - statistic=self.request.get('statistic', None), - build_type=self.request.get('build_type')) - min_revision = self.request.get('min_revision') - min_revision = int(min_revision) if min_revision else None - max_revision = self.request.get('max_revision') - max_revision = int(max_revision) if max_revision else None - query = TimeseriesQuery( - desc, - self.request.get('columns').split(','), min_revision, max_revision, - api_utils.ParseISO8601(self.request.get('min_timestamp', None)), - api_utils.ParseISO8601(self.request.get('max_timestamp', None))) - try: - result = query.FetchSync() - except AssertionError: - # The caller has requested internal-only data but is not authorized. - raise api_request_handler.NotFoundError - return result - - -class TimeseriesQuery(object): +from flask import request + + +def _CheckUser(): + pass + + +@api_request_handler.RequestHandlerDecoratorFactory(_CheckUser) +def TimeSeries2Post(): + desc = descriptor.Descriptor( + test_suite=request.values.get('test_suite'), + measurement=request.values.get('measurement'), + bot=request.values.get('bot'), + test_case=request.values.get('test_case'), + statistic=request.values.get('statistic', None), + build_type=request.values.get('build_type')) + min_revision = request.values.get('min_revision') + min_revision = int(min_revision) if min_revision else None + max_revision = request.values.get('max_revision') + max_revision = int(max_revision) if max_revision else None + query = TimeseriesQuery( + desc, + request.values.get('columns').split(','), min_revision, max_revision, + api_utils.ParseISO8601(request.values.get('min_timestamp', None)), + api_utils.ParseISO8601(request.values.get('max_timestamp', None))) + try: + result = query.FetchSync() + except AssertionError as e: + # The caller has requested internal-only data but is not authorized. + six.raise_from(api_request_handler.NotFoundError, e) + return result + + +class TimeseriesQuery: def __init__(self, desc, @@ -302,7 +306,7 @@ def _FilterRowQuery(self, query): query = query.filter(graph_data.Row.revision >= self._min_revision) if self._max_revision: query = query.filter(graph_data.Row.revision <= self._max_revision) - query = query.order(-graph_data.Row.revision) + query = query.order(-graph_data.Row.revision) # pylint: disable=invalid-unary-operand-type elif self._min_timestamp or self._max_timestamp: if self._min_timestamp: query = query.filter(graph_data.Row.timestamp >= self._min_timestamp) @@ -310,7 +314,7 @@ def _FilterRowQuery(self, query): query = query.filter(graph_data.Row.timestamp <= self._max_timestamp) query = query.order(-graph_data.Row.timestamp) else: - query = query.order(-graph_data.Row.revision) + query = query.order(-graph_data.Row.revision) # pylint: disable=invalid-unary-operand-type return query @ndb.tasklet diff --git a/dashboard/dashboard/api/timeseries2_test.py b/dashboard/dashboard/api/timeseries2_test.py index 2c452e200d5..5eed1011380 100644 --- a/dashboard/dashboard/api/timeseries2_test.py +++ b/dashboard/dashboard/api/timeseries2_test.py @@ -7,6 +7,7 @@ from __future__ import absolute_import import datetime +from flask import Flask import json import unittest import uuid @@ -31,12 +32,19 @@ 'unit': 'count' } +flask_app = Flask(__name__) + + +@flask_app.route('/api/timeseries2', methods=['POST']) +def TimeSeries2Post(): + return timeseries2.TimeSeries2Post() + class Timeseries2Test(testing_common.TestCase): def setUp(self): - super(Timeseries2Test, self).setUp() - self.SetUpApp([('/api/timeseries2', timeseries2.Timeseries2Handler)]) + super().setUp() + self.SetUpFlaskApp(flask_app) self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) self.SetCurrentUserOAuth(None) diff --git a/dashboard/dashboard/api/timeseries_test.py b/dashboard/dashboard/api/timeseries_test.py deleted file mode 100644 index 35de670ff0a..00000000000 --- a/dashboard/dashboard/api/timeseries_test.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import datetime -import unittest - -from dashboard.api import api_auth -from dashboard.api import timeseries -from dashboard.common import testing_common -from dashboard.common import utils -from dashboard.models import anomaly - - -class TimeseriesTest(testing_common.TestCase): - - def setUp(self): - super(TimeseriesTest, self).setUp() - self.SetUpApp([(r'/api/timeseries/(.*)', timeseries.TimeseriesHandler)]) - self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0]) - - def _AddData(self): - """Adds sample TestMetadata entities and returns their keys.""" - testing_common.AddTests(['ChromiumPerf'], ['linux'], - {'page_cycler': { - 'warm': { - 'cnn': {}, - } - }}) - test_path = 'ChromiumPerf/linux/page_cycler/warm/cnn' - test = utils.TestKey(test_path).get() - test.improvement_direction = anomaly.UP - test.put() - - now = datetime.datetime.now() - last_week = now - datetime.timedelta(days=7) - rows = dict([(i * 100, { - 'value': i * 1000, - 'a_whatever': 'blah', - 'r_v8': '1234a', - 'timestamp': now if i > 5 else last_week, - 'error': 3.3232 - }) for i in range(1, 10)]) - rows[100]['r_not_every_row'] = 12345 - testing_common.AddRows('ChromiumPerf/linux/page_cycler/warm/cnn', rows) - - def testPost_TestPath_ReturnsInternalData(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self._AddData() - test = utils.TestKey('ChromiumPerf/linux/page_cycler/warm/cnn').get() - test.internal_only = True - test.put() - - response = self.Post( - '/api/timeseries/ChromiumPerf/linux/page_cycler/warm/cnn') - data = self.GetJsonValue(response, 'timeseries') - self.assertEquals(10, len(data)) - self.assertEquals( - ['revision', 'value', 'timestamp', 'r_not_every_row', 'r_v8'], data[0]) - self.assertEquals(100, data[1][0]) - self.assertEquals(900, data[9][0]) - self.assertEquals('1234a', data[1][4]) - - improvement_direction = self.GetJsonValue(response, 'improvement_direction') - self.assertEquals(improvement_direction, anomaly.UP) - - def testPost_NumDays_ChecksTimestamp(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - self._AddData() - - response = self.Post( - '/api/timeseries/ChromiumPerf/linux/page_cycler/warm/cnn', - {'num_days': 1}) - data = self.GetJsonValue(response, 'timeseries') - self.assertEquals(5, len(data)) - self.assertEquals(['revision', 'value', 'timestamp', 'r_v8'], data[0]) - self.assertEquals(600, data[1][0]) - self.assertEquals(900, data[4][0]) - self.assertEquals('1234a', data[1][3]) - - def testPost_NumDaysNotNumber_400Response(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self.Post( - '/api/timeseries/ChromiumPerf/linux/page_cycler/warm/cnn', - {'num_days': 'foo'}, - status=400) - self.assertIn('Invalid num_days parameter', response.body) - - def testPost_NegativeNumDays_400Response(self): - self.SetCurrentUserOAuth(testing_common.INTERNAL_USER) - response = self.Post( - '/api/timeseries/ChromiumPerf/linux/page_cycler/warm/cnn', - {'num_days': -1}, - status=400) - self.assertIn('num_days cannot be negative', response.body) - - def testPost_ExternalUserInternalData_500Error(self): - self.SetCurrentUserOAuth(testing_common.EXTERNAL_USER) - self._AddData() - test = utils.TestKey('ChromiumPerf/linux/page_cycler/warm/cnn').get() - test.internal_only = True - test.put() - - self.Post( - '/api/timeseries/ChromiumPerf/linux/page_cycler/warm/cnn', status=500) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/api/utils.py b/dashboard/dashboard/api/utils.py index c5a8986137c..03b9883cbd8 100644 --- a/dashboard/dashboard/api/utils.py +++ b/dashboard/dashboard/api/utils.py @@ -16,10 +16,9 @@ def ParseBool(value): value_lower = value.lower() if value_lower in ('true', '1'): return True - elif value_lower in ('false', '0'): + if value_lower in ('false', '0'): return False - else: - raise ValueError(value) + raise ValueError(value) def ParseISO8601(s): diff --git a/dashboard/dashboard/associate_alerts.py b/dashboard/dashboard/associate_alerts.py index 8795ca95e7a..c8f03fded0e 100644 --- a/dashboard/dashboard/associate_alerts.py +++ b/dashboard/dashboard/associate_alerts.py @@ -6,185 +6,174 @@ from __future__ import division from __future__ import absolute_import +from flask import request import re -from google.appengine.api import users from google.appengine.ext import ndb -from dashboard import oauth2_decorator from dashboard.common import request_handler from dashboard.common import utils from dashboard.models import anomaly -from dashboard.services import issue_tracker_service - - -class AssociateAlertsHandler(request_handler.RequestHandler): - """Associates alerts with a bug.""" - - def post(self): - """POST is the same as GET for this endpoint.""" - self.get() - - @oauth2_decorator.DECORATOR.oauth_required - def get(self): - """Response handler for the page used to group an alert with a bug. - - Request parameters: - bug_id: Bug ID number, as a string (when submitting the form). - project_id: Monorail project ID (when submitting the form). - keys: Comma-separated alert keys in urlsafe format. - confirm: If non-empty, associate alerts with a bug ID even if - it appears that the alerts already associated with that bug - have a non-overlapping revision range. - - Outputs: - HTML with result. - """ - if not utils.IsValidSheriffUser(): - user = users.get_current_user() - self.ReportError('User "%s" not authorized.' % user, status=403) - return - - urlsafe_keys = self.request.get('keys') - if not urlsafe_keys: - self.RenderHtml('bug_result.html', - {'error': 'No alerts specified to add bugs to.'}) - return - - is_confirmed = bool(self.request.get('confirm')) - bug_id = self.request.get('bug_id') - if bug_id: - project_id = self.request.get('project_id', 'chromium') - self._AssociateAlertsWithBug(bug_id, project_id, urlsafe_keys, +from dashboard.services import perf_issue_service_client + + +def AssociateAlertsHandlerPost(): + """Response handler for the page used to group an alert with a bug. + + Request parameters: + bug_id: Bug ID number, as a string (when submitting the form). + project_id: Monorail project ID (when submitting the form). + keys: Comma-separated alert keys in urlsafe format. + confirm: If non-empty, associate alerts with a bug ID even if + it appears that the alerts already associated with that bug + have a non-overlapping revision range. + + Outputs: + HTML with result. + """ + if not utils.IsValidSheriffUser(): + user = utils.GetGaeCurrentUser() + return request_handler.RequestHandlerReportError( + 'User "%s" not authorized.' % user, status=403) + + urlsafe_keys = request.values.get('keys') + if not urlsafe_keys: + return request_handler.RequestHandlerRenderHtml( + 'bug_result.html', {'error': 'No alerts specified to add bugs to.'}) + + is_confirmed = bool(request.values.get('confirm')) + bug_id = request.values.get('bug_id') + if bug_id: + project_id = request.values.get('project_id', 'chromium') + return _AssociateAlertsWithBug(bug_id, project_id, urlsafe_keys, is_confirmed) - else: - self._ShowCommentDialog(urlsafe_keys) - - def _ShowCommentDialog(self, urlsafe_keys): - """Sends a HTML page with a form for selecting a bug number. - - Args: - urlsafe_keys: Comma-separated Alert keys in urlsafe format. - """ - # Get information about Alert entities and related TestMetadata entities, - # so that they can be compared with recent bugs. - alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] - alert_entities = ndb.get_multi(alert_keys) - ranges = [(a.start_revision, a.end_revision) for a in alert_entities] - - # Mark bugs that have overlapping revision ranges as potentially relevant. - # On the alerts page, alerts are only highlighted if the revision range - # overlaps with the revision ranges for all of the selected alerts; the - # same thing is done here. - bugs = self._FetchBugs() - for bug in bugs: - this_range = _RevisionRangeFromSummary(bug['summary']) - bug['relevant'] = all(_RangesOverlap(this_range, r) for r in ranges) - - self.RenderHtml( - 'bug_result.html', { - 'bug_associate_form': True, - 'keys': urlsafe_keys, - 'bugs': bugs, - 'projects': utils.MONORAIL_PROJECTS - }) - - def _FetchBugs(self): - http = oauth2_decorator.DECORATOR.http() - issue_tracker = issue_tracker_service.IssueTrackerService(http) - response = issue_tracker.List( - q='opened-after:today-5', - label='Type-Bug-Regression,Performance', - sort='-id') - return response.get('items', []) if response else [] - - def _AssociateAlertsWithBug(self, bug_id, project_id, urlsafe_keys, - is_confirmed): - """Sets the bug ID for a set of alerts. - - This is done after the user enters and submits a bug ID. - - Args: - bug_id: Bug ID number, as a string. - project_id: Monorial project ID. - urlsafe_keys: Comma-separated Alert keys in urlsafe format. - is_confirmed: Whether the user has confirmed that they really want - to associate the alerts with a bug even if it appears that the - revision ranges don't overlap. - """ - # Validate bug ID. - try: - bug_id = int(bug_id) - except ValueError: - self.RenderHtml('bug_result.html', - {'error': 'Invalid bug ID "%s".' % str(bug_id)}) - return - - # Get Anomaly entities and related TestMetadata entities. - alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] - alert_entities = ndb.get_multi(alert_keys) - - if not is_confirmed: - warning_msg = self._VerifyAnomaliesOverlap(alert_entities, bug_id, - project_id) - if warning_msg: - self._ShowConfirmDialog('associate_alerts', warning_msg, { - 'bug_id': bug_id, - 'project_id': project_id, - 'keys': urlsafe_keys, - }) - return - - AssociateAlerts(bug_id, project_id, alert_entities) - - self.RenderHtml('bug_result.html', { - 'bug_id': bug_id, - 'project_id': project_id, - }) - - def _VerifyAnomaliesOverlap(self, alerts, bug_id, project_id): - """Checks whether the alerts' revision ranges intersect. - - Args: - alerts: A list of Alert entities to verify. - bug_id: Bug ID number. - project_id: Monorail project ID. - - Returns: - A string with warning message, or None if there's no warning. - """ - if not utils.MinimumAlertRange(alerts): - return 'Selected alerts do not have overlapping revision range.' - else: - alerts_with_bug, _, _ = anomaly.Anomaly.QueryAsync( - bug_id=bug_id, project_id=project_id, limit=500).get_result() - - if not alerts_with_bug: - return None - if not utils.MinimumAlertRange(alerts_with_bug): - return ('Alerts in bug %s:%s do not have overlapping revision ' - 'range.' % (project_id, bug_id)) - elif not utils.MinimumAlertRange(alerts + alerts_with_bug): - return ('Selected alerts do not have overlapping revision ' - 'range with alerts in bug %s:%s.' % (project_id, bug_id)) + return _ShowCommentDialog(urlsafe_keys) + + +def _ShowCommentDialog(urlsafe_keys): + """Sends a HTML page with a form for selecting a bug number. + + Args: + urlsafe_keys: Comma-separated Alert keys in urlsafe format. + """ + # Get information about Alert entities and related TestMetadata entities, + # so that they can be compared with recent bugs. + alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] + alert_entities = ndb.get_multi(alert_keys) + ranges = [(a.start_revision, a.end_revision) for a in alert_entities] + + # Mark bugs that have overlapping revision ranges as potentially relevant. + # On the alerts page, alerts are only highlighted if the revision range + # overlaps with the revision ranges for all of the selected alerts; the + # same thing is done here. + bugs = _FetchBugs() + for bug in bugs: + this_range = _RevisionRangeFromSummary(bug['summary']) + bug['relevant'] = all(_RangesOverlap(this_range, r) for r in ranges) + + return request_handler.RequestHandlerRenderHtml( + 'bug_result.html', { + 'bug_associate_form': True, + 'keys': urlsafe_keys, + 'bugs': bugs, + 'projects': utils.MONORAIL_PROJECTS + }) + + +def _FetchBugs(): + response = perf_issue_service_client.GetIssues( + age=5, + status='all', + labels='Type-Bug-Regression,Performance', + ) + + return response + + +def _AssociateAlertsWithBug(bug_id, project_id, urlsafe_keys, is_confirmed): + """Sets the bug ID for a set of alerts. + + This is done after the user enters and submits a bug ID. + + Args: + bug_id: Bug ID number, as a string. + project_id: Monorial project ID. + urlsafe_keys: Comma-separated Alert keys in urlsafe format. + is_confirmed: Whether the user has confirmed that they really want + to associate the alerts with a bug even if it appears that the + revision ranges don't overlap. + """ + # Validate bug ID. + try: + bug_id = int(bug_id) + except ValueError: + return request_handler.RequestHandlerRenderHtml( + 'bug_result.html', {'error': 'Invalid bug ID "%s".' % str(bug_id)}) + + # Get Anomaly entities and related TestMetadata entities. + alert_keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] + alert_entities = ndb.get_multi(alert_keys) + + if not is_confirmed: + warning_msg = _VerifyAnomaliesOverlap(alert_entities, bug_id, project_id) + if warning_msg: + return _ShowConfirmDialog('associate_alerts', warning_msg, { + 'bug_id': bug_id, + 'project_id': project_id, + 'keys': urlsafe_keys, + }) + + AssociateAlerts(bug_id, project_id, alert_entities) + + return request_handler.RequestHandlerRenderHtml('bug_result.html', { + 'bug_id': bug_id, + 'project_id': project_id, + }) + + +def _VerifyAnomaliesOverlap(alerts, bug_id, project_id): + """Checks whether the alerts' revision ranges intersect. + + Args: + alerts: A list of Alert entities to verify. + bug_id: Bug ID number. + project_id: Monorail project ID. + + Returns: + A string with warning message, or None if there's no warning. + """ + if not utils.MinimumAlertRange(alerts): + return 'Selected alerts do not have overlapping revision range.' + alerts_with_bug, _, _ = anomaly.Anomaly.QueryAsync( + bug_id=bug_id, project_id=project_id, limit=500).get_result() + + if not alerts_with_bug: return None + if not utils.MinimumAlertRange(alerts_with_bug): + return ('Alerts in bug %s:%s do not have overlapping revision ' + 'range.' % (project_id, bug_id)) + if not utils.MinimumAlertRange(alerts + alerts_with_bug): + return ('Selected alerts do not have overlapping revision ' + 'range with alerts in bug %s:%s.' % (project_id, bug_id)) + return None - def _ShowConfirmDialog(self, handler, message, parameters): - """Sends a HTML page with a form to confirm an action. - - Args: - handler: Name of URL handler to submit confirm dialog. - message: Confirmation message. - parameters: Dictionary of request parameters to submit with confirm - dialog. - """ - self.RenderHtml( - 'bug_result.html', { - 'confirmation_required': True, - 'handler': handler, - 'message': message, - 'parameters': parameters or {} - }) + +def _ShowConfirmDialog(handler, message, parameters): + """Sends a HTML page with a form to confirm an action. + + Args: + handler: Name of URL handler to submit confirm dialog. + message: Confirmation message. + parameters: Dictionary of request parameters to submit with confirm + dialog. + """ + return request_handler.RequestHandlerRenderHtml( + 'bug_result.html', { + 'confirmation_required': True, + 'handler': handler, + 'message': message, + 'parameters': parameters or {} + }) def AssociateAlerts(bug_id, project_id, alerts): diff --git a/dashboard/dashboard/associate_alerts_test.py b/dashboard/dashboard/associate_alerts_test.py index 4f701efabb2..9ed6034cd1e 100644 --- a/dashboard/dashboard/associate_alerts_test.py +++ b/dashboard/dashboard/associate_alerts_test.py @@ -6,31 +6,33 @@ from __future__ import division from __future__ import absolute_import -import unittest - +from flask import Flask import mock -import webapp2 +import six +import unittest import webtest -# pylint: disable=unused-import -from dashboard import mock_oauth2_decorator -# pylint: enable=unused-import - from dashboard import associate_alerts from dashboard.common import testing_common from dashboard.common import utils from dashboard.models import anomaly from dashboard.models.subscription import Subscription -from dashboard.services import issue_tracker_service +from dashboard.services import perf_issue_service_client + + +flask_app = Flask(__name__) + + +@flask_app.route('/associate_alerts', methods=['GET', 'POST']) +def AssociateAlertsHandlerPost(): + return associate_alerts.AssociateAlertsHandlerPost() class AssociateAlertsTest(testing_common.TestCase): def setUp(self): - super(AssociateAlertsTest, self).setUp() - app = webapp2.WSGIApplication([('/associate_alerts', - associate_alerts.AssociateAlertsHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetSheriffDomains(['chromium.org']) self.SetCurrentUser('foo@chromium.org', is_admin=True) @@ -67,7 +69,7 @@ def _AddAnomalies(self): subscriptions=[subscription], subscription_names=[subscription.name], ).put() - key_map[end_rev] = anomaly_key.urlsafe() + key_map[end_rev] = six.ensure_str(anomaly_key.urlsafe()) # Add an anomaly that overlaps. anomaly_key = anomaly.Anomaly( @@ -79,7 +81,7 @@ def _AddAnomalies(self): subscriptions=[subscription], subscription_names=[subscription.name], ).put() - key_map[9996] = anomaly_key.urlsafe() + key_map[9996] = six.ensure_str(anomaly_key.urlsafe()) # Add an anomaly that overlaps and has bug ID. anomaly_key = anomaly.Anomaly( @@ -92,12 +94,12 @@ def _AddAnomalies(self): subscriptions=[subscription], subscription_names=[subscription.name], ).put() - key_map[9997] = anomaly_key.urlsafe() + key_map[9997] = six.ensure_str(anomaly_key.urlsafe()) return key_map def testGet_NoKeys_ShowsError(self): response = self.testapp.get('/associate_alerts') - self.assertIn('
', response.body) + self.assertIn(b'
', response.body) def testGet_SameAsPost(self): get_response = self.testapp.get('/associate_alerts') @@ -108,46 +110,43 @@ def testGet_InvalidBugId_ShowsError(self): key_map = self._AddAnomalies() response = self.testapp.get('/associate_alerts?keys=%s&bug_id=foo' % key_map[9996]) - self.assertIn('
', response.body) - self.assertIn('Invalid bug ID', response.body) + self.assertIn(b'
', response.body) + self.assertIn(b'Invalid bug ID', response.body) # Mocks fetching bugs from issue tracker. - @mock.patch('services.issue_tracker_service.discovery.build', - mock.MagicMock()) + @mock.patch('dashboard.common.utils.ServiceAccountHttp', mock.MagicMock()) @mock.patch.object( - issue_tracker_service.IssueTrackerService, 'List', - mock.MagicMock( - return_value={ - 'items': [ - { - 'id': 12345, - 'summary': '5% regression in bot/suite/x at 10000:20000', - 'state': 'open', - 'status': 'New', - 'author': { - 'name': 'exam...@google.com' - }, - }, - { - 'id': 13579, - 'summary': '1% regression in bot/suite/y at 10000:20000', - 'state': 'closed', - 'status': 'WontFix', - 'author': { - 'name': 'exam...@google.com' - }, - }, - ] - })) + perf_issue_service_client, 'GetIssues', + mock.MagicMock(return_value=[ + { + 'id': 12345, + 'summary': '5% regression in bot/suite/x at 10000:20000', + 'state': 'open', + 'status': 'New', + 'author': { + 'name': 'exam...@google.com' + }, + }, + { + 'id': 13579, + 'summary': '1% regression in bot/suite/y at 10000:20000', + 'state': 'closed', + 'status': 'WontFix', + 'author': { + 'name': 'exam...@google.com' + }, + }, + ])) def testGet_NoBugId_ShowsDialog(self): # When a GET request is made with some anomaly keys but no bug ID, # A HTML form is shown for the user to input a bug number. key_map = self._AddAnomalies() response = self.testapp.get('/associate_alerts?keys=%s' % key_map[10000]) + # The response contains a table of recent bugs and a form. - self.assertIn('12345', response.body) - self.assertIn('13579', response.body) - self.assertIn(' a2.percent_changed: return -1 - elif a1.percent_changed < a2.percent_changed: + if a1.percent_changed < a2.percent_changed: return 1 return 0 diff --git a/dashboard/dashboard/auto_bisect_test.py b/dashboard/dashboard/auto_bisect_test.py index 8ddb822cbfd..607e147ec8c 100644 --- a/dashboard/dashboard/auto_bisect_test.py +++ b/dashboard/dashboard/auto_bisect_test.py @@ -21,7 +21,7 @@ class StartNewBisectForBugTest(testing_common.TestCase): def setUp(self): - super(StartNewBisectForBugTest, self).setUp() + super().setUp() self.SetCurrentUser('internal@chromium.org') namespaced_stored_object.Set('bot_configurations', { 'linux-pinpoint': {}, @@ -99,7 +99,7 @@ def testStartNewBisectForBug_Pinpoint_Succeeds(self, mock_new): 'issue_url': 'http://pinpoint/123' }, result) self.assertEqual('123', a.get().pinpoint_bisects[0]) - self.assertEqual({ + self.assertCountEqual({ 'alert': a.urlsafe(), 'test_path': test_key.id() }, json.loads(mock_new.call_args[0][0]['tags'])) diff --git a/dashboard/dashboard/bug_details.py b/dashboard/dashboard/bug_details.py deleted file mode 100644 index 61d33c352f5..00000000000 --- a/dashboard/dashboard/bug_details.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""Provides an endpoint for getting details about a sheriffed bug.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json -import re - -from dashboard import oauth2_decorator -from dashboard.common import request_handler -from dashboard.services import issue_tracker_service - -BUGDROID = 'bugdroid1@chromium.org' -REVIEW_RE = r'(Review-Url|Reviewed-on): (https?:\/\/[\/\.\w\d]+)' - - -class BugDetailsHandler(request_handler.RequestHandler): - """Gets details about a sheriffed bug.""" - - def post(self): - """POST is the same as GET for this endpoint.""" - self.get() - - @oauth2_decorator.DECORATOR.oauth_required - def get(self): - """Response handler to get details about a specific bug. - - Request parameters: - bug_id: Bug ID number, as a string - """ - bug_id = int(self.request.get('bug_id'), 0) - if bug_id <= 0: - self.ReportError('Invalid or no bug id specified.') - return - - http = oauth2_decorator.DECORATOR.http() - self.response.out.write(json.dumps(GetBugDetails(bug_id, http))) - - -def GetBugDetails(bug_id, http): - bug_details = _GetDetailsFromMonorail(bug_id, http) - bug_details['review_urls'] = _GetLinkedRevisions(bug_details['comments']) - bug_details['bisects'] = [] - return bug_details - - -def _GetDetailsFromMonorail(bug_id, http): - issue_tracker = issue_tracker_service.IssueTrackerService(http) - bug_details = issue_tracker.GetIssue(bug_id) - if not bug_details: - return {'error': 'Failed to get bug details from monorail API'} - bug_details['comments'] = issue_tracker.GetIssueComments(bug_id) - owner = None - if bug_details.get('owner'): - owner = bug_details.get('owner').get('name') - return { - 'comments': bug_details['comments'], - 'owner': owner, - 'published': bug_details['published'], - 'state': bug_details['state'], - 'status': bug_details['status'], - 'summary': bug_details['summary'], - } - - -def _GetLinkedRevisions(comments): - """Parses the comments for commits linked by bugdroid.""" - review_urls = [] - bugdroid_comments = [c for c in comments if c['author'] == BUGDROID] - for comment in bugdroid_comments: - m = re.search(REVIEW_RE, comment['content']) - if m: - review_urls.append(m.group(2)) - return review_urls diff --git a/dashboard/dashboard/bug_details_test.py b/dashboard/dashboard/bug_details_test.py deleted file mode 100644 index 2b2cf7c15a1..00000000000 --- a/dashboard/dashboard/bug_details_test.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import sys -import unittest - -import mock -import webapp2 -import webtest - -# pylint: disable=unused-import -from dashboard import mock_oauth2_decorator -# pylint: enable=unused-import - -from dashboard import bug_details -from dashboard.common import testing_common -from dashboard.services import issue_tracker_service - -GET_ISSUE_DATA = { - 'owner': { - 'name': 'sullivan@chromium.org' - }, - 'state': 'open', - 'status': 'Untriaged', - 'summary': 'Regression in sunspider', - 'published': '2017-02-17T23:08:44', -} - -GET_COMMENTS_DATA = [{ - 'author': 'foo@chromium.org', - 'content': 'This is the first comment', - 'published': '2017-02-17T09:59:55', -}, { - 'author': 'bar@chromium.org', - 'content': 'This is the second comment', - 'published': '2017-02-17T10:00:0', -}, { - 'author': 'bugdroid1@chromium.org', - 'content': 'The following revision refers to this bug:\n' - ' https://chromium.googlesource.com/chromium/src.git/+/' - '9ac6e6466cc0df7e1a3ad4488c5c8bdc2db4da36\n\n' - 'Review-Url: https://codereview.chromium.org/2707483002\n\n', - 'published': '2017-02-17T23:08:44', -}] - - -class BugDetailsHandlerTest(testing_common.TestCase): - - def setUp(self): - super(BugDetailsHandlerTest, self).setUp() - app = webapp2.WSGIApplication([('/bug_details', - bug_details.BugDetailsHandler)]) - self.testapp = webtest.TestApp(app) - - # Mocks fetching bugs from issue tracker. - @unittest.skipIf(sys.platform.startswith('linux'), 'oauth2 mock error') - @mock.patch('services.issue_tracker_service.discovery.build', - mock.MagicMock()) - @mock.patch.object(issue_tracker_service.IssueTrackerService, 'GetIssue', - mock.MagicMock(return_value=GET_ISSUE_DATA)) - @mock.patch.object(issue_tracker_service.IssueTrackerService, - 'GetIssueComments', - mock.MagicMock(return_value=GET_COMMENTS_DATA)) - def testPost(self): - response = self.testapp.post('/bug_details', {'bug_id': '12345'}) - self.assertEqual('Regression in sunspider', - self.GetJsonValue(response, 'summary')) - self.assertEqual('sullivan@chromium.org', - self.GetJsonValue(response, 'owner')) - self.assertEqual('2017-02-17T23:08:44', - self.GetJsonValue(response, 'published')) - self.assertEqual('open', self.GetJsonValue(response, 'state')) - self.assertEqual('Untriaged', self.GetJsonValue(response, 'status')) - comments = self.GetJsonValue(response, 'comments') - self.assertEqual(3, len(comments)) - self.assertEqual('This is the second comment', comments[1]['content']) - self.assertItemsEqual(['https://codereview.chromium.org/2707483002'], - self.GetJsonValue(response, 'review_urls')) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/buildbucket_job.py b/dashboard/dashboard/buildbucket_job.py index 7bbb6ec069b..c7aa7d1d44a 100644 --- a/dashboard/dashboard/buildbucket_job.py +++ b/dashboard/dashboard/buildbucket_job.py @@ -9,7 +9,7 @@ import re -class BisectJob(object): +class BisectJob: """A buildbot bisect job started and monitored through buildbucket.""" def __init__(self, diff --git a/dashboard/dashboard/buildbucket_job_status.py b/dashboard/dashboard/buildbucket_job_status.py index 92c6ed8b97d..91256b028ea 100644 --- a/dashboard/dashboard/buildbucket_job_status.py +++ b/dashboard/dashboard/buildbucket_job_status.py @@ -11,35 +11,32 @@ from dashboard.common import request_handler from dashboard.services import buildbucket_service +from dashboard.services import request -class BuildbucketJobStatusHandler(request_handler.RequestHandler): - """Handler for requests of the form /buildbucket_job_status/01234567... - - This displays information regarding the status of the buildbucket job in a - human-readable format. - """ - - def get(self, job_id): +def BuildbucketJobStatusGet(job_id): + error, error_code = False, None + try: original_status = buildbucket_service.GetJobStatus(job_id) - - error = 'error' in original_status - - if error: - error_reason = original_status['error'].get('reason') - status_text = json.dumps(original_status, sort_keys=True, indent=4) - else: - clean_status = _ConvertTimes(_ParseJsonKeys(original_status.get('build'))) - status_text = json.dumps(clean_status, sort_keys=True, indent=4) - - self.RenderHtml( - 'buildbucket_job_status.html', { - 'job_id': job_id, - 'status_text': 'DATA:' + status_text, - 'build': None if error else clean_status, - 'error': error_reason if error else None, - 'original_response': original_status, - }) + # The _ParseJsonKeys and _ConvertTimes should be no longer needed in + # buildbucket V2 as no fields in the current proto has those suffixes. + status_text = json.dumps(original_status, sort_keys=True, indent=4) + except (request.NotFoundError, request.RequestError) as e: + error = True + original_status = e.content + status_text = original_status + error_code = e.headers.get('x-prpc-grpc-code', None) + + # In V2, the error_reason (e.g., BUILD_NOT_FOUND) is no longer part of the + # response. We have a numeric value 'x-prpc-grpc-code' in the header. + return request_handler.RequestHandlerRenderHtml( + 'buildbucket_job_status.html', { + 'job_id': job_id, + 'status_text': 'DATA:' + status_text, + 'build': None if error else original_status, + 'error': ('gRPC code: %s' % error_code) if error else None, + 'original_response': original_status, + }) def _ConvertTimes(dictionary): diff --git a/dashboard/dashboard/buildbucket_job_status_test.py b/dashboard/dashboard/buildbucket_job_status_test.py index edd285dba7a..a42a657f92b 100644 --- a/dashboard/dashboard/buildbucket_job_status_test.py +++ b/dashboard/dashboard/buildbucket_job_status_test.py @@ -6,19 +6,21 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask, Response import json -import re import unittest import mock -import webapp2 import webtest +from dashboard import common +from dashboard import services from dashboard import buildbucket_job_status from dashboard.common import testing_common +from dashboard.services import request SAMPLE_RESPONSE = r"""{ - "build": { + "build": { "status": "COMPLETED", "created_ts": "1430771172999340", "url": "http://build.chromium.org/p/tryserver.chromium.perf/builders\ @@ -86,54 +88,74 @@ \"test_type\": \"perf\"}}}", "completed_ts": "1430771433288680", "updated_ts": "1430771433288850" - }, - "kind": "buildbucket#resourcesItem", - "etag": "\"mWAxLWqIHM8gXvavjiTVUApk92U/AaU08KGmhFQcdRWOCVgNYJBBlgI\"" + }, + "kind": "buildbucket#resourcesItem", + "etag": "\"mWAxLWqIHM8gXvavjiTVUApk92U/AaU08KGmhFQcdRWOCVgNYJBBlgI\"" }""".replace('\\\n', '') SAMPLE_RESPONSE_NOT_FOUND = r"""{ - "error": { + "error": { "message": "", "reason": "BUILD_NOT_FOUND" - }, - "kind": "buildbucket#resourcesItem", - "etag": "\"mWAxLWqIHM8gXvavjiTVUApk92U/vcsTyxWNZoEnszG8qWqlQLOhpl8\"" + }, + "kind": "buildbucket#resourcesItem", + "etag": "\"mWAxLWqIHM8gXvavjiTVUApk92U/vcsTyxWNZoEnszG8qWqlQLOhpl8\"" }""" +flask_app = Flask(__name__) + + +@flask_app.route('/buildbucket_job_status/') +def BuildbucketJobStatusGet(job_id): + return buildbucket_job_status.BuildbucketJobStatusGet(job_id) + class BuildbucketJobStatusTest(testing_common.TestCase): def setUp(self): - super(BuildbucketJobStatusTest, self).setUp() - app = webapp2.WSGIApplication([ - (r'/buildbucket_job_status/(\d+)', - buildbucket_job_status.BuildbucketJobStatusHandler) - ]) - self.testapp = webtest.TestApp(app) - - @mock.patch.object(buildbucket_job_status.buildbucket_service, 'GetJobStatus', - mock.MagicMock(return_value=json.loads(SAMPLE_RESPONSE))) - def testGet_ExistingJob(self): - response = self.testapp.get('/buildbucket_job_status/9046721402459257808') - # Verify that a human-readable creation time is presented. We check for the - # minute:second string to avoid localization from breaking this test. - self.assertIn('26:12', response.body) - # Verify that both the good and bad revisions are displayed somewhere. - self.assertIn('328115', response.body) - self.assertIn('328111', response.body) - # Verify that a link to buildbot is provided somewhere. - self.assertTrue( - re.search('href\\s*=\\s*[\'"]http://build.chromium.org/p/tryserver', - response.body, re.IGNORECASE)) + super().setUp() + self.testapp = webtest.TestApp(flask_app) @mock.patch.object( - buildbucket_job_status.buildbucket_service, 'GetJobStatus', - mock.MagicMock(return_value=json.loads(SAMPLE_RESPONSE_NOT_FOUND))) + services.buildbucket_service, 'GetJobStatus', + mock.MagicMock(return_value=json.loads(r"""{"status": "SUCCESS"}"""))) + def testGet_ExistingJob(self): + with mock.patch.object( + common.request_handler, + 'RequestHandlerRenderHtml', + return_value=Response()) as render: + self.testapp.get('/buildbucket_job_status/12345') + render.assert_called_once_with( + 'buildbucket_job_status.html', { + 'job_id': '12345', + 'status_text': 'DATA:{\n "status": "SUCCESS"\n}', + 'build': { + "status": "SUCCESS" + }, + 'error': None, + 'original_response': { + "status": "SUCCESS" + } + }) + + @mock.patch.object(services.buildbucket_service, 'GetJobStatus', + mock.MagicMock( + side_effect=request.NotFoundError( + 'oops', {'x-prpc-grpc-code': '5'}, 'Error msg.'))) def testGet_JobNotFound(self): - response = self.testapp.get('/buildbucket_job_status/9046721402459257808') - # If the error code is shown somewhere in the page and no exception is - # raised, that's good enough. - self.assertIn('BUILD_NOT_FOUND', response) + with mock.patch.object( + common.request_handler, + 'RequestHandlerRenderHtml', + return_value=Response()) as render: + self.testapp.get('/buildbucket_job_status/12345') + render.assert_called_once_with( + 'buildbucket_job_status.html', { + 'job_id': '12345', + 'status_text': 'DATA:Error msg.', + 'build': None, + 'error': 'gRPC code: 5', + 'original_response': 'Error msg.' + }) if __name__ == '__main__': diff --git a/dashboard/dashboard/buildbucket_job_test.py b/dashboard/dashboard/buildbucket_job_test.py index 7b3e51f0ec4..9403faedac0 100644 --- a/dashboard/dashboard/buildbucket_job_test.py +++ b/dashboard/dashboard/buildbucket_job_test.py @@ -15,7 +15,7 @@ class BuildbucketJobTest(testing_common.TestCase): def setUp(self): - super(BuildbucketJobTest, self).setUp() + super().setUp() self._args_base = { 'try_job_id': 1, 'recipe_tester_name': 'linux_perf_bisect', diff --git a/dashboard/dashboard/can_bisect_test.py b/dashboard/dashboard/can_bisect_test.py index eed826c0cc8..7fcbaf683bc 100644 --- a/dashboard/dashboard/can_bisect_test.py +++ b/dashboard/dashboard/can_bisect_test.py @@ -16,7 +16,7 @@ class CanBisectTest(testing_common.TestCase): def setUp(self): - super(CanBisectTest, self).setUp() + super().setUp() namespaced_stored_object.Set( can_bisect.BISECT_BOT_MAP_KEY, {'SupportedDomain': ['perf_bot', 'bisect_bot']}) diff --git a/dashboard/dashboard/chart_handler.py b/dashboard/dashboard/chart_handler.py index babf35a9e05..9bda0fcab01 100644 --- a/dashboard/dashboard/chart_handler.py +++ b/dashboard/dashboard/chart_handler.py @@ -12,20 +12,21 @@ from dashboard import revision_info_client -class ChartHandler(request_handler.RequestHandler): - """Base class for requests which display a chart.""" - - def RenderHtml(self, template_file, template_values, status=200): - """Fills in template values for pages that show charts.""" - template_values.update(self._GetChartValues()) - template_values['revision_info'] = json.dumps( - template_values['revision_info']) - return super(ChartHandler, self).RenderHtml(template_file, template_values, - status) - - def GetDynamicVariables(self, template_values, request_path=None): - template_values.update(self._GetChartValues()) - super(ChartHandler, self).GetDynamicVariables(template_values, request_path) - - def _GetChartValues(self): - return {'revision_info': revision_info_client.GetRevisionInfoConfig()} +def RenderHtml(template_file, template_values, status=200): + """Fills in template values for pages that show charts.""" + template_values.update(_GetChartValues()) + template_values['revision_info'] = json.dumps( + template_values['revision_info']) + return request_handler.RequestHandlerRenderHtml(template_file, + template_values, status) + + +def GetDynamicVariables(template_values, request_path=None): + template_values['revision_info'] = \ + revision_info_client.GetRevisionInfoConfig() + request_handler.RequestHandlerGetDynamicVariables(template_values, + request_path) + + +def _GetChartValues(): + return {'revision_info': revision_info_client.GetRevisionInfoConfig()} diff --git a/dashboard/dashboard/common/bot_configurations.py b/dashboard/dashboard/common/bot_configurations.py index 7955ad5fa5b..8718ec95217 100644 --- a/dashboard/dashboard/common/bot_configurations.py +++ b/dashboard/dashboard/common/bot_configurations.py @@ -6,8 +6,6 @@ from __future__ import division from __future__ import absolute_import -import string - from google.appengine.ext import ndb from dashboard.common import namespaced_stored_object @@ -43,7 +41,9 @@ def GetAliasesAsync(bot): def List(): bot_configurations = namespaced_stored_object.Get(BOT_CONFIGURATIONS_KEY) + if not bot_configurations: + return [] canonical_names = [ name for name, value in bot_configurations.items() if 'alias' not in value ] - return sorted(canonical_names, key=string.lower) + return sorted(canonical_names, key=str.lower) diff --git a/dashboard/dashboard/common/bot_configurations_test.py b/dashboard/dashboard/common/bot_configurations_test.py index 17cb35e8721..79df578e931 100644 --- a/dashboard/dashboard/common/bot_configurations_test.py +++ b/dashboard/dashboard/common/bot_configurations_test.py @@ -14,7 +14,7 @@ class ConfigTest(testing_common.TestCase): def setUp(self): - super(ConfigTest, self).setUp() + super().setUp() namespaced_stored_object.Set( bot_configurations.BOT_CONFIGURATIONS_KEY, { diff --git a/dashboard/dashboard/common/cloud_metric.py b/dashboard/dashboard/common/cloud_metric.py new file mode 100644 index 00000000000..46852df8ffb --- /dev/null +++ b/dashboard/dashboard/common/cloud_metric.py @@ -0,0 +1,383 @@ +# Copyright 2023 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import absolute_import + +import logging +import time +import uuid +from google.appengine.api import app_identity +from google.cloud import monitoring_v3 + +METRIC_TYPE_PREFIX = "custom.googleapis.com/" +RESOURCE_TYPE = "generic_task" +LOCATION = "us-central1" +NAMESPACE = "Prod" +DEFAULT_TASK_ID = "task_id" +JOB_ID = "job_id" +JOB_TYPE = "job_type" +JOB_STATUS = "job_status" +API_METRIC_TYPE = "api/metrics" +API_NAME = "api_name" +REQUEST_STATUS = "request_status" +RUN_TIME = "run_time" +USER = "user" +ORIGIN = "origin" +JOB_TYPE_BY_NAME = "job_type_by_name" +UUID = "uuid" +BOT_NAME = "bot_name" +BENCHMARK = "benchmark" +STORY = "story" + +# swarming metric label keys +SWARMING_TASK_ID = 'swarming_task_id' +SWARMING_BOT_ID = 'swarming_bot_id' +SWARMING_BOT_OS = 'swarming_bot_os' +SWARMING_TASK_PENDING_TIME = 'swarming_task_pending_time' +SWARMING_TASK_RUNNING_TIME = 'swarming_task_running_time' + +#Auto Triaged Issue Status +AUTO_TRIAGE_CREATED = 'issue_created' +AUTO_TRIAGE_BISECTED = 'issue_bisected' +AUTO_TRIAGE_CULPRIT_FOUND = 'culprit_found' + + +def PublishAutoTriagedIssue(status): + label_dict = {'status': status} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/alerts/auto_triaged', + label_dict=label_dict) + + +def PublishPerfIssueServiceGroupingImpariry(endpoint): + label_dict = {'endpoint': endpoint} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/perf_issue_service/grouping_imparity', + label_dict=label_dict) + + +def PublishPerfIssueInvalidComponentCount(component_count): + label_dict = {'component_count': str(component_count)} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/perf_issue_service/invalidcomponentcount', + label_dict=label_dict) + + +def PublishPerfIssueServiceRequests(request, method, url, data): + data = { + k: (v if type(v) in (bytes, str) else str(v)) for k, v in data.items() + } + label_dict = {'request': request, 'method': method, 'url': url, **data} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/perf_issue_service/request', + label_dict=label_dict) + + +def PublishPerfIssueServiceRequestFailures(request, method, url, data): + data = { + k: (v if type(v) in (bytes, str) else str(v)) for k, v in data.items() + } + label_dict = {'request': request, 'method': method, 'url': url, **data} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/perf_issue_service/failure', + label_dict=label_dict) + + +def PublishSkiaUploadResult(test_path, error_message, status): + label_dict = { + 'uuid': str(uuid.uuid4()), + 'test_path': test_path, + 'error_message': error_message, + 'status': status, + } + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='chromeperf', + metric_type='chromeperf/skia/upload_status', + label_dict=label_dict) + + +def PublishSwarmingBotPendingTasksMetric(bot_id, pool, count): + label_dict = {'bot_id': bot_id, 'pool': pool} + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='pinpoint', + metric_type='pinpoint/swarming_job/bot_pending_tasks', + label_dict=label_dict, + metric_value=count) + +def PublishPinpointSwarmingPendingMetric(task_id, pinpoint_job_type, + pinpoint_job_id, bot_id, bot_os, + pending_time): + label_dict = { + SWARMING_TASK_ID: task_id, + JOB_TYPE: pinpoint_job_type, + JOB_ID: pinpoint_job_id, + SWARMING_BOT_ID: bot_id, + SWARMING_BOT_OS: bot_os + } + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='pinpoint', + metric_type='pinpoint/swarming_job/pending_time', + label_dict=label_dict, + metric_value=pending_time) + + +def PublishPinpointSwarmingRuntimeMetric(task_id, pinpoint_job_type, + pinpoint_job_id, bot_id, bot_os, + running_time): + label_dict = { + SWARMING_TASK_ID: task_id, + JOB_TYPE: pinpoint_job_type, + JOB_ID: pinpoint_job_id, + SWARMING_BOT_ID: bot_id, + SWARMING_BOT_OS: bot_os + } + _PublishTSCloudMetric( + project_id=app_identity.get_application_id(), + service_name='pinpoint', + metric_type='pinpoint/swarming_job/run_time', + label_dict=label_dict, + metric_value=running_time) + + +def PublishFrozenJobMetric(project_id, job_id, job_type, job_status, + metric_value=1): + label_dict = {JOB_ID: job_id, JOB_TYPE: job_type, JOB_STATUS: job_status} + _PublishTSCloudMetric(project_id, "pinpoint", "pinpoint/job/frozen_job", + label_dict, metric_value) + + +def PublishPinpointJobStatusMetric(project_id, + job_id, + job_type, + job_status, + job_user, + origin, + job_type_by_name, + bot_name, + benchmark, + story, + metric_value=1): + label_dict = { + JOB_ID: job_id, + JOB_TYPE: job_type, + JOB_STATUS: job_status, + USER: job_user, + ORIGIN: origin, + JOB_TYPE_BY_NAME: job_type_by_name, + BOT_NAME: bot_name, + BENCHMARK: benchmark, + STORY: story + } + _PublishTSCloudMetric(project_id, "pinpoint", "pinpoint/job/status_change", + label_dict, metric_value) + + +def PublishPinpointJobRunTimeMetric(project_id, job_id, job_type, job_status, + job_user, origin, job_type_by_name, + bot_name, benchmark, story, metric_value): + label_dict = { + JOB_ID: job_id, + JOB_TYPE: job_type, + JOB_STATUS: job_status, + USER: job_user, + ORIGIN: origin, + JOB_TYPE_BY_NAME: job_type_by_name, + BOT_NAME: bot_name, + BENCHMARK: benchmark, + STORY: story + } + _PublishTSCloudMetric(project_id, "pinpoint", "pinpoint/job/run_time", + label_dict, metric_value) + + +def PublishPinpointJobDetailMetrics(project_id, + job_id, + job_type, + job_status, + job_user, + origin, + job_type_by_name, + bot_name, + benchmark, + story, + change_count, + attempt_count, + difference_count=0): + label_dict = { + JOB_ID: job_id, + JOB_TYPE: job_type, + JOB_STATUS: job_status, + USER: job_user, + ORIGIN: origin, + JOB_TYPE_BY_NAME: job_type_by_name, + BOT_NAME: bot_name, + BENCHMARK: benchmark, + STORY: story + } + _PublishTSCloudMetric(project_id, "pinpoint", + "pinpoint/job/change_count_per_job", label_dict, + change_count) + _PublishTSCloudMetric(project_id, "pinpoint", + "pinpoint/job/attempt_count_per_job", label_dict, + attempt_count) + _PublishTSCloudMetric(project_id, "pinpoint", + "pinpoint/job/difference_count_per_job", label_dict, + difference_count) + _PublishTSCloudMetric(project_id, "pinpoint", + "pinpoint/job/has_difference", label_dict, + 0 if difference_count == 0 else 1) + + +def _PublishTSCloudMetric(project_id, + service_name, + metric_type, + label_dict, + metric_value=1): + if app_identity.get_application_id() == 'testbed-test': + # do not proceed if it is running unit tests + return + + client = monitoring_v3.MetricServiceClient() + project_name = f"projects/{project_id}" + + series = monitoring_v3.TimeSeries() + + series.metric.type = METRIC_TYPE_PREFIX + metric_type + + series.resource.type = RESOURCE_TYPE + + # The identifier of the GCP project associated with this resource, + # such as "my-project". + series.resource.labels["project_id"] = project_id + + # The GCP region in which data about the resource is stored + series.resource.labels["location"] = LOCATION + + # A namespace identifier, such as a cluster name: Dev, Staging or Prod + series.resource.labels["namespace"] = NAMESPACE + + # An identifier for a grouping of related tasks, such as the name of + # a microservice or distributed batch job + series.resource.labels["job"] = service_name + + # A unique identifier for the task within the namespace and job, + # set default value for this manditory field + series.resource.labels["task_id"] = DEFAULT_TASK_ID + + # debug infor for crbug/1422306 + for key in label_dict: + try: + series.metric.labels[key] = label_dict[key] + except TypeError as e: + series.metric.labels[key] = str(label_dict[key]) + logging.warning('Invalid value found in label_dict: %s. (%s)', label_dict, + str(e)) + + now = time.time() + seconds = int(now) + nanos = int((now - seconds) * 10**9) + interval = monitoring_v3.TimeInterval( + {"end_time": { + "seconds": seconds, + "nanos": nanos + }}) + try: + point = monitoring_v3.Point({ + "interval": interval, + "value": { + "double_value": metric_value + } + }) + except TypeError as e: + point = monitoring_v3.Point({ + "interval": interval, + "value": { + "double_value": int(metric_value) + } + }) + logging.warning('Invalid value found in metric_value: %s. (%s)', + metric_value, str(e)) + + series.points = [point] + + try: + client.create_time_series(name=project_name, time_series=[series]) + except Exception as e: # pylint: disable=broad-except + # Swallow the error from Cloud Monitoring API, the failure from + # Cloud Monitoring API should not break our code logic. + logging.warning('Publish data to Cloud Monitoring failed. Error: %s', e) + + +class APIMetricLogger: + + def __init__(self, service_name, api_name): + """ This metric logger can be used by the with statement: + https://peps.python.org/pep-0343/ + """ + self._service_name = service_name + self._api_name = api_name + self._start = None + self.seconds = 0 + + def _Now(self): + return time.time() + + def __enter__(self): + self._start = self._Now() + # Currently, Cloud Monitoring allows one write every 5 seconds for any + # unique tuple (metric_name, metric_label_value_1, metric_label_value_2, …). + # + # To avoid being throttled by Cloud Monitoring, add a UUID label_value to + # make the tuple unique. + # https://cloud.google.com/monitoring/quotas + label_dict = {API_NAME: self._api_name, REQUEST_STATUS: "started", + UUID: str(uuid.uuid4())} + _PublishTSCloudMetric(app_identity.get_application_id(), self._service_name, + API_METRIC_TYPE, label_dict) + + def __exit__(self, exception_type, exception_value, execution_traceback): + if exception_type is None: + # with statement BLOCK runs succeed + self.seconds = self._Now() - self._start + logging.info('%s:%s=%f', self._service_name, self._api_name, self.seconds) + label_dict = {API_NAME: self._api_name, REQUEST_STATUS: "completed", + UUID: str(uuid.uuid4())} + _PublishTSCloudMetric(app_identity.get_application_id(), + self._service_name, API_METRIC_TYPE, label_dict, + self.seconds) + return True + + # with statement BLOCK throws exception + label_dict = {API_NAME: self._api_name, REQUEST_STATUS: "failed", + UUID: str(uuid.uuid4())} + _PublishTSCloudMetric(app_identity.get_application_id(), + self._service_name, API_METRIC_TYPE, label_dict) + # throw out the original exception + return False + + +def APIMetric(service_name, api_name): + + def Decorator(wrapped): + + def Wrapper(*a, **kw): + with APIMetricLogger(service_name, api_name): + return wrapped(*a, **kw) + + return Wrapper + + return Decorator diff --git a/dashboard/dashboard/common/clustering_change_detector.py b/dashboard/dashboard/common/clustering_change_detector.py index a9a013888a0..3f900a54187 100644 --- a/dashboard/dashboard/common/clustering_change_detector.py +++ b/dashboard/dashboard/common/clustering_change_detector.py @@ -258,4 +258,4 @@ def ClusterAndFindSplit(values, rand=None): if not candidate_indices: raise InsufficientData('Not enough data to suggest a change point.') - return [c for c in sorted(candidate_indices)] + return list(sorted(candidate_indices)) diff --git a/dashboard/dashboard/common/datastore_hooks.py b/dashboard/dashboard/common/datastore_hooks.py index 78899f1243b..27a4a569089 100644 --- a/dashboard/dashboard/common/datastore_hooks.py +++ b/dashboard/dashboard/common/datastore_hooks.py @@ -10,7 +10,10 @@ from __future__ import division from __future__ import absolute_import -import webapp2 +from flask import g as flask_global +from flask import request as flask_request +import logging +import os from google.appengine.api import apiproxy_stub_map from google.appengine.api import users @@ -57,8 +60,7 @@ def SetPrivilegedRequest(): This should be set once per request, before accessing the data store. """ - request = webapp2.get_request() - request.registry['privileged'] = True + flask_global.privileged = True def SetSinglePrivilegedRequest(): @@ -68,38 +70,45 @@ def SetSinglePrivilegedRequest(): before making a query. It will be automatically unset when the next query is made. """ - request = webapp2.get_request() - request.registry['single_privileged'] = True + flask_global.single_privileged = True def CancelSinglePrivilegedRequest(): - """Disallows the current request to act as a privileged user only.""" - request = webapp2.get_request() - request.registry['single_privileged'] = False + """Disallows the current request to act as a privileged user only. + + """ + flask_global.single_privileged = False def _IsServicingPrivilegedRequest(): - """Checks whether the request is considered privileged.""" + """Checks whether the request is considered privileged. + + """ try: - request = webapp2.get_request() - except AssertionError: - # This happens in unit tests, when code gets called outside of a request. - return False - path = getattr(request, 'path', '') + if 'privileged' in flask_global and flask_global.privileged: + return True + if 'single_privileged' in flask_global and flask_global.single_privileged: + flask_global.pop('single_privileged') + return True + path = flask_request.path + except RuntimeError: + # This happens in defer queue and unit tests, when code gets called + # without any context of a flask request. + try: + path = os.environ['PATH_INFO'] + except KeyError: + logging.error( + 'Cannot tell whether a request is privileged without request path.') + return False if path.startswith('/mapreduce'): return True if path.startswith('/_ah/queue/deferred'): return True if path.startswith('/_ah/pipeline/'): return True - if request.registry.get('privileged', False): - return True - if request.registry.get('single_privileged', False): - request.registry['single_privileged'] = False - return True - allowlist = utils.GetIpAllowlist() - if allowlist and hasattr(request, 'remote_addr'): - return request.remote_addr in allowlist + # We have been checking on utils.GetIpAllowlist() here. Though, the list + # has been empty and we are infinite recursive calls in crbug/1402197. + # Thus, we remove the check here. return False @@ -113,13 +122,13 @@ def IsUnalteredQueryPermitted(): Returns: True for users with google.com emails and privileged requests. """ - if utils.IsInternalUser(): + if _IsServicingPrivilegedRequest(): return True - if users.is_current_user_admin(): - # It's possible to be an admin with a non-internal account; For example, - # the default login for dev appserver instances is test@example.com. + if utils.IsInternalUser(): return True - return _IsServicingPrivilegedRequest() + # It's possible to be an admin with a non-internal account; For example, + # the default login for dev appserver instances is test@example.com. + return users.is_current_user_admin() def GetNamespace(): @@ -144,20 +153,15 @@ def _DatastorePreHook(service, call, request, _): assert service == 'datastore_v3' if call != 'RunQuery': return - if request.kind() not in _INTERNAL_ONLY_KINDS: - return if IsUnalteredQueryPermitted(): return # Add a filter for internal_only == False, because the user is external. - try: - external_filter = request.filter_list().add() - except AttributeError: - # This is required to support proto1, which may be used by the unit tests. - # Later, if we don't need to support proto1, then this can be removed. - external_filter = request.add_filter() - external_filter.set_op(datastore_pb.Query_Filter.EQUAL) - new_property = external_filter.add_property() - new_property.set_name('internal_only') - new_property.mutable_value().set_booleanvalue(False) - new_property.set_multiple(False) + if request.kind not in _INTERNAL_ONLY_KINDS: + return + query_filter = request.filter.add() + query_filter.op = datastore_pb.Query.Filter.EQUAL + filter_property = query_filter.property.add() + filter_property.name = 'internal_only' + filter_property.value.booleanValue = False + filter_property.multiple = False diff --git a/dashboard/dashboard/common/datastore_hooks_test.py b/dashboard/dashboard/common/datastore_hooks_test.py index b42c4ccc3e1..f82f7983352 100644 --- a/dashboard/dashboard/common/datastore_hooks_test.py +++ b/dashboard/dashboard/common/datastore_hooks_test.py @@ -6,6 +6,7 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import unittest from google.appengine.ext import ndb @@ -16,7 +17,7 @@ from dashboard.models import graph_data -class FakeRequest(object): +class FakeRequest: def __init__(self): self.registry = {} @@ -25,7 +26,7 @@ def __init__(self): class DatastoreHooksTest(testing_common.TestCase): def setUp(self): - super(DatastoreHooksTest, self).setUp() + super().setUp() testing_common.SetIsInternalUser('internal@chromium.org', True) testing_common.SetIsInternalUser('foo@chromium.org', False) self._AddDataToDatastore() @@ -33,7 +34,7 @@ def setUp(self): self.PatchDatastoreHooksRequest() def tearDown(self): - super(DatastoreHooksTest, self).tearDown() + super().tearDown() self.UnsetCurrentUser() def _AddDataToDatastore(self): @@ -161,21 +162,25 @@ def testQuery_InternalUser_InternalOnlyFetched(self): self._CheckQueryResults(True) def testQuery_PrivilegedRequest_InternalOnlyFetched(self): - self.UnsetCurrentUser() - datastore_hooks.SetPrivilegedRequest() - self._CheckQueryResults(True) + app = Flask(__name__) + with app.test_request_context('dummy/path', 'GET'): + self.UnsetCurrentUser() + datastore_hooks.SetPrivilegedRequest() + self._CheckQueryResults(True) def testQuery_SinglePrivilegedRequest_InternalOnlyFetched(self): - self.UnsetCurrentUser() - datastore_hooks.SetSinglePrivilegedRequest() - # Not using _CheckQueryResults because this only affects a single query. - # First query has internal results. - bots = graph_data.Bot.query().fetch() - self.assertEqual(2, len(bots)) + app = Flask(__name__) + with app.test_request_context('dummy/path', 'GET'): + self.UnsetCurrentUser() + datastore_hooks.SetSinglePrivilegedRequest() + # Not using _CheckQueryResults because this only affects a single query. + # First query has internal results. + bots = graph_data.Bot.query().fetch() + self.assertEqual(2, len(bots)) - # Second query does not. - bots = graph_data.Bot.query().fetch() - self.assertEqual(1, len(bots)) + # Second query does not. + bots = graph_data.Bot.query().fetch() + self.assertEqual(1, len(bots)) def _CheckGet(self, include_internal): m = ndb.Key('Master', 'ChromiumPerf').get() @@ -219,9 +224,11 @@ def testGet_AdminUser(self): self._CheckGet(include_internal=True) def testGet_PrivilegedRequest(self): - self.UnsetCurrentUser() - datastore_hooks.SetPrivilegedRequest() - self._CheckGet(include_internal=True) + app = Flask(__name__) + with app.test_request_context('dummy/path', 'GET'): + self.UnsetCurrentUser() + datastore_hooks.SetPrivilegedRequest() + self._CheckGet(include_internal=True) if __name__ == '__main__': diff --git a/dashboard/dashboard/common/defaults.py b/dashboard/dashboard/common/defaults.py index 83ec7b5ad5f..59dc5a59ccf 100644 --- a/dashboard/dashboard/common/defaults.py +++ b/dashboard/dashboard/common/defaults.py @@ -14,7 +14,7 @@ # NOTE: Whenever you update any of the default values below, please also make # the same updates at the following locations: -# * AnomalyConfig defined in ../proto/sheriff.proto +# * AnomalyConfig defined in ../protobuf/sheriff.proto # * anomaly_configs_defaults defined in # https://chrome-internal.googlesource.com/infra/infra_internal/+/HEAD/infra/config/subprojects/chromeperf-sheriffs.star @@ -35,7 +35,7 @@ MIN_ABSOLUTE_CHANGE = 0 # Minimum relative difference between medians before and after. -MIN_RELATIVE_CHANGE = 0.01 +MIN_RELATIVE_CHANGE = 0.1 # "Steppiness" is a number between 0 and 1 that indicates how similar the # shape is to a perfect step function, where 1 represents a step function. diff --git a/dashboard/dashboard/common/descriptor.py b/dashboard/dashboard/common/descriptor.py index 1bb472ea9f6..2a2ceeacf8f 100644 --- a/dashboard/dashboard/common/descriptor.py +++ b/dashboard/dashboard/common/descriptor.py @@ -73,7 +73,7 @@ COMPLEX_CASES_TEST_SUITES_KEY = 'complex_cases_test_suites' -class Descriptor(object): +class Descriptor: """Describe a timeseries by its characteristics. Supports partial test paths (e.g. test suite paths) by allowing some @@ -103,6 +103,10 @@ def __repr__(self): self.test_suite, self.measurement, self.bot, self.test_case, self.statistic, self.build_type) + def __hash__(self): + return hash(self.test_suite, self.measurement, self.bot, self.test_case, + self.statistic, self.build_type) + def __eq__(self, other): return repr(self) == repr(other) diff --git a/dashboard/dashboard/common/descriptor_test.py b/dashboard/dashboard/common/descriptor_test.py index b86c20fa3b0..6af64eaee84 100644 --- a/dashboard/dashboard/common/descriptor_test.py +++ b/dashboard/dashboard/common/descriptor_test.py @@ -18,7 +18,7 @@ class DescriptorTest(testing_common.TestCase): def setUp(self): - super(DescriptorTest, self).setUp() + super().setUp() stored_object.Set(descriptor.PARTIAL_TEST_SUITES_KEY, [ 'TEST_PARTIAL_TEST_SUITE', ]) diff --git a/dashboard/dashboard/common/feature_flags.py b/dashboard/dashboard/common/feature_flags.py new file mode 100644 index 00000000000..a176151d3b6 --- /dev/null +++ b/dashboard/dashboard/common/feature_flags.py @@ -0,0 +1,7 @@ +# Copyright 2023 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""An *extremely* low-budget "feature flag" implementation.""" + +# https://bugs.chromium.org/p/chromium/issues/detail?id=1419903 +SANDWICH_VERIFICATION = True diff --git a/dashboard/dashboard/common/file_bug.py b/dashboard/dashboard/common/file_bug.py index 374d70ae406..b2b890b92ab 100644 --- a/dashboard/dashboard/common/file_bug.py +++ b/dashboard/dashboard/common/file_bug.py @@ -24,7 +24,7 @@ from dashboard.models import histogram from dashboard.services import crrev_service from dashboard.services import gitiles_service -from dashboard.services import issue_tracker_service +from dashboard.services import perf_issue_service_client from tracing.value.diagnostics import reserved_infos # A list of bug labels to suggest for all performance regression bugs. @@ -52,7 +52,8 @@ def _AdditionalDetails(bug_id, project_id, alerts): """Returns a message with additional information to add to a bug.""" base_url = '%s/group_report' % _GetServerURL() bug_page_url = '%s?bug_id=%s&project_id=%s' % (base_url, bug_id, project_id) - sid = short_uri.GetOrCreatePageState(json.dumps(_UrlsafeKeys(alerts))) + alert_keys = utils.ConvertBytesBeforeJsonDumps(_UrlsafeKeys(alerts)) + sid = short_uri.GetOrCreatePageState(json.dumps(alert_keys)) alerts_url = '%s?sid=%s' % (base_url, sid) comment = 'All graphs for this bug:\n %s\n\n' % bug_page_url comment += ( @@ -211,9 +212,9 @@ def _GetMilestoneForRevision(revision): default_milestone = milestone except ValueError: # Sometimes 'N/A' is given. We ignore these entries. - logging.warn('Could not cast one of: %s, %s, %s as an int', revision, - version['branch_base_position'], - version['current_version'].split('.')[0]) + logging.warning('Could not cast one of: %s, %s, %s as an int', revision, + version['branch_base_position'], + version['current_version'].split('.')[0]) if milestones: return min(milestones) return default_milestone @@ -273,11 +274,7 @@ def GetCommitInfoForAlert(alert, crrev=None, gitiles=None): return gitiles.CommitInfo(repository_url, rev) -def AssignBugToCLAuthor(bug_id, - commit_info, - service, - labels=None, - project='chromium'): +def AssignBugToCLAuthor(bug_id, commit_info, labels=None, project='chromium'): """Assigns the bug to the author of the given revision.""" author = commit_info['author']['email'] message = commit_info['message'] @@ -288,19 +285,18 @@ def AssignBugToCLAuthor(bug_id, author, message) author = alternative_assignee or author - service.AddBugComment( + perf_issue_service_client.PostIssueComment( bug_id, - 'Assigning to %s because this is the only CL in range:\n%s' % + project, + comment='Assigning to %s because this is the only CL in range:\n%s' % (author, message), status='Assigned', labels=labels, owner=author, - project=project, ) -def FileBug(http, - owner, +def FileBug(owner, cc, summary, description, @@ -319,10 +315,9 @@ def FileBug(http, if milestone_label: labels.append(milestone_label) - user_issue_tracker_service = issue_tracker_service.IssueTrackerService(http) - new_bug_response = user_issue_tracker_service.NewBug( - summary, - description, + new_bug_response = perf_issue_service_client.PostIssue( + title=summary, + description=description, project=project_id or 'chromium', labels=labels, components=components, @@ -332,7 +327,7 @@ def FileBug(http, if 'error' in new_bug_response: return {'error': new_bug_response['error']} - bug_id = new_bug_response['bug_id'] + bug_id = new_bug_response['issue_id'] bug_data.Bug.New(bug_id=bug_id, project=project_id or 'chromium').put() for a in alerts: @@ -344,27 +339,24 @@ def FileBug(http, # Add the bug comment with the service account, so that there are no # permissions issues. - dashboard_issue_tracker_service = issue_tracker_service.IssueTrackerService( - utils.ServiceAccountHttp()) - dashboard_issue_tracker_service.AddBugComment(bug_id, comment_body, - project_id) + perf_issue_service_client.PostIssueComment( + bug_id, project_id, comment=comment_body) template_params = {'bug_id': bug_id, 'project_id': project_id} if all(k.kind() == 'Anomaly' for k in alert_keys): - logging.info('Kicking bisect for bug ' + str(bug_id)) + logging.info('Kicking bisect for bug %s', bug_id) culprit_rev = _GetSingleCLForAnomalies(alerts) if culprit_rev is not None: commit_info = GetCommitInfoForAlert(alerts[0]) if commit_info: needs_bisect = False - AssignBugToCLAuthor(bug_id, commit_info, - dashboard_issue_tracker_service) + AssignBugToCLAuthor(bug_id, commit_info) if needs_bisect: bisect_result = auto_bisect.StartNewBisectForBug(bug_id, project_id) if 'error' in bisect_result: - logging.info('Failed to kick bisect for ' + str(bug_id)) + logging.info('Failed to kick bisect for %s', bug_id) template_params['bisect_error'] = bisect_result['error'] else: - logging.info('Successfully kicked bisect for ' + str(bug_id)) + logging.info('Successfully kicked bisect for %s', bug_id) template_params.update(bisect_result) else: kinds = set() diff --git a/dashboard/dashboard/common/histogram_helpers.py b/dashboard/dashboard/common/histogram_helpers.py index 36c973162f8..067bdc3eccf 100644 --- a/dashboard/dashboard/common/histogram_helpers.py +++ b/dashboard/dashboard/common/histogram_helpers.py @@ -40,8 +40,7 @@ 'smoothness.tough_pinch_zoom_cases', 'smoothness.tough_scrolling_cases', 'smoothness.tough_texture_upload_cases', 'smoothness.tough_webgl_ad_cases', 'smoothness.tough_webgl_cases', 'speedometer', 'speedometer-future', - 'speedometer2', 'speedometer2-future', 'start_with_url.cold.startup_pages', - 'start_with_url.warm.startup_pages', 'thread_times.key_hit_test_cases', + 'speedometer2', 'speedometer2-future', 'thread_times.key_hit_test_cases', 'thread_times.key_idle_power_cases', 'thread_times.key_mobile_sites_smooth', 'thread_times.key_noop_cases', 'thread_times.key_silk_cases', 'thread_times.simple_mobile_sites', 'thread_times.tough_compositor_cases', @@ -174,7 +173,7 @@ def ShouldFilterStatistic(test_name, benchmark_name, stat_name): if benchmark_name.startswith('memory.long_running'): value_name = '%s_%s' % (test_name, stat_name) return not _ShouldAddMemoryLongRunningValue(value_name) - if benchmark_name == 'media.desktop' or benchmark_name == 'media.mobile': + if benchmark_name in ('media.desktop', 'media.mobile'): value_name = '%s_%s' % (test_name, stat_name) return not _ShouldAddMediaValue(value_name) if benchmark_name.startswith('system_health'): diff --git a/dashboard/dashboard/common/histogram_helpers_test.py b/dashboard/dashboard/common/histogram_helpers_test.py index f192a1d2669..7045c3e5c1f 100644 --- a/dashboard/dashboard/common/histogram_helpers_test.py +++ b/dashboard/dashboard/common/histogram_helpers_test.py @@ -16,9 +16,6 @@ class HistogramHelpersTest(testing_common.TestCase): - def setUp(self): - super(HistogramHelpersTest, self).setUp() - def testGetGroupingLabelFromHistogram_NoTags_ReturnsEmpty(self): hist = histogram_module.Histogram('hist', 'count') self.assertEqual('', histogram_helpers.GetGroupingLabelFromHistogram(hist)) diff --git a/dashboard/dashboard/common/layered_cache.py b/dashboard/dashboard/common/layered_cache.py index a82adfa174c..dd0220576e7 100644 --- a/dashboard/dashboard/common/layered_cache.py +++ b/dashboard/dashboard/common/layered_cache.py @@ -32,11 +32,7 @@ from __future__ import division from __future__ import absolute_import -try: - import cPickle -except ImportError: - # pickle in python 3 uses the c version as cPickle in python 2. - import pickle as cPickle +import six.moves.cPickle as cPickle import datetime import logging @@ -68,7 +64,8 @@ def GetExpiredKeys(cls): current_time = datetime.datetime.now() query = cls.query(cls.expire_time < current_time) query = query.filter(cls.expire_time != None) - return query.fetch(keys_only=True) + query = query.order(-cls.expire_time) + return query.fetch(limit=1000, keys_only=True) def Get(key): @@ -80,8 +77,7 @@ def Get(key): namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY) if entity: return cPickle.loads(entity.value) - else: - return stored_object.Get(key) + return stored_object.Get(key) def GetExternal(key): @@ -94,8 +90,7 @@ def GetExternal(key): namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY) if entity: return cPickle.loads(entity.value) - else: - return stored_object.Get(key) + return stored_object.Get(key) def Set(key, value, days_to_keep=None, namespace=None): diff --git a/dashboard/dashboard/common/layered_cache_test.py b/dashboard/dashboard/common/layered_cache_test.py index 18a491366f7..945d0b9d686 100644 --- a/dashboard/dashboard/common/layered_cache_test.py +++ b/dashboard/dashboard/common/layered_cache_test.py @@ -6,11 +6,7 @@ from __future__ import division from __future__ import absolute_import -try: - import cPickle -except ImportError: - # pickle in python 3 uses the c version as cPickle in python 2. - import pickle as cPickle +import six.moves.cPickle as cPickle import datetime import mock import unittest @@ -26,7 +22,7 @@ class LayeredCacheTest(testing_common.TestCase): def setUp(self): - super(LayeredCacheTest, self).setUp() + super().setUp() self.UnsetCurrentUser() testing_common.SetIsInternalUser('internal@chromium.org', True) testing_common.SetIsInternalUser('foo@chromium.org', False) diff --git a/dashboard/dashboard/common/math_utils_test.py b/dashboard/dashboard/common/math_utils_test.py index 48ba16e0b80..ca2c930c040 100644 --- a/dashboard/dashboard/common/math_utils_test.py +++ b/dashboard/dashboard/common/math_utils_test.py @@ -82,19 +82,19 @@ def testRelativeChange_FromZero_ReturnsInf(self): self.assertEqual(float('inf'), math_utils.RelativeChange(0, 1)) def testIqr(self): - self.assertEqual(4, math_utils.Iqr(range(8, 0, -1))) + self.assertEqual(4, math_utils.Iqr(list(range(8, 0, -1)))) def testPercentile_RoundIndex(self): - self.assertEqual(2, math_utils.Percentile(range(5), 0.5)) + self.assertEqual(2, math_utils.Percentile(list(range(5)), 0.5)) def testPercentile_Interpolation(self): - self.assertEqual(5.1, math_utils.Percentile(range(8), 0.7)) + self.assertEqual(5.1, math_utils.Percentile(list(range(8)), 0.7)) def testPercentile_Min(self): - self.assertEqual(0, math_utils.Percentile(range(8), 0)) + self.assertEqual(0, math_utils.Percentile(list(range(8)), 0)) def testPercentile_Max(self): - self.assertEqual(7, math_utils.Percentile(range(8), 1)) + self.assertEqual(7, math_utils.Percentile(list(range(8)), 1)) if __name__ == '__main__': diff --git a/dashboard/dashboard/common/namespaced_stored_object_test.py b/dashboard/dashboard/common/namespaced_stored_object_test.py index 501f05268c9..3e8441ceb33 100644 --- a/dashboard/dashboard/common/namespaced_stored_object_test.py +++ b/dashboard/dashboard/common/namespaced_stored_object_test.py @@ -16,12 +16,12 @@ class NamespacedStoredObjectTest(testing_common.TestCase): def setUp(self): - super(NamespacedStoredObjectTest, self).setUp() + super().setUp() testing_common.SetIsInternalUser('internal@chromium.org', True) testing_common.SetIsInternalUser('foo@chromium.org', False) def tearDown(self): - super(NamespacedStoredObjectTest, self).tearDown() + super().tearDown() self.UnsetCurrentUser() def testSet_InternalUser_InternalVersionSet(self): diff --git a/dashboard/dashboard/common/oauth2_utils.py b/dashboard/dashboard/common/oauth2_utils.py new file mode 100644 index 00000000000..74a2eb045b1 --- /dev/null +++ b/dashboard/dashboard/common/oauth2_utils.py @@ -0,0 +1,106 @@ +# Copyright 2022 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""oauth2 function wrappers which are used by chromeperf.""" +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import base64 +import binascii +import hashlib +import hmac +import logging +import six +import time + +import google.auth + +XSRF_DELIMITER = b':' +XSRF_DEFAULT_TIMEOUT_SECS = 60 * 60 + + +def GetAppDefaultCredentials(scope=None): + try: + credentials, _ = google.auth.default() + if scope and credentials.requires_scopes: + credentials = credentials.with_scopes([scope]) + return credentials + except google.auth.exceptions.DefaultCredentialsError as e: + logging.error('Error when getting the application default credentials: %s', + str(e)) + return None + + +# The xsrf related logic are from the oauth2client.xsrfutil. The oauth2client +# itself has been long deprecated and the xsrfutil is a helper which does not +# rely on the auth-related workflows. +def GenerateXsrfToken(key, user_id, action_id='', when=None): + """Generates a URL-safe token for the given user, action, time tuple. + + Args: + key: secret key to use. + user_id: the user ID of the authenticated user. + action_id: a string identifier of the action they requested + authorization for. + when: the time in seconds since the epoch at which the user was + authorized for this action. If not set the current time is used. + + Returns: + A string XSRF protection token. + """ + digester = hmac.new( + six.ensure_binary(key, encoding='utf-8'), digestmod=hashlib.md5) + digester.update(six.ensure_binary(str(user_id), encoding='utf-8')) + digester.update(XSRF_DELIMITER) + digester.update(six.ensure_binary(action_id, encoding='utf-8')) + digester.update(XSRF_DELIMITER) + when = six.ensure_binary(str(when or int(time.time())), encoding='utf-8') + digester.update(when) + digest = digester.digest() + + token = base64.urlsafe_b64encode(digest + XSRF_DELIMITER + when) + return token + + +def ValidateXsrfToken(key, token, user_id, action_id="", current_time=None): + """Validates that the given token authorizes the user for the action. + + Tokens are invalid if the time of issue is too old or if the token + does not match what generateToken outputs (i.e. the token was forged). + + Args: + key: secret key to use. + token: a string of the token generated by generateToken. + user_id: the user ID of the authenticated user. + action_id: a string identifier of the action they requested + authorization for. + + Returns: + A boolean - True if the user is authorized for the action, False + otherwise. + """ + if not token: + return False + try: + decoded = base64.urlsafe_b64decode(token) + token_time = int(decoded.split(XSRF_DELIMITER)[-1]) + except (TypeError, ValueError, binascii.Error): + return False + if current_time is None: + current_time = time.time() + # If the token is too old it's not valid. + if current_time - token_time > XSRF_DEFAULT_TIMEOUT_SECS: + return False + + # The given token should match the generated one with the same time. + expected_token = GenerateXsrfToken( + key, user_id, action_id=action_id, when=token_time) + if len(token) != len(expected_token): + return False + + # Perform constant time comparison to avoid timing attacks + different = 0 + for x, y in zip(bytearray(token), bytearray(expected_token)): + different |= x ^ y + return not different diff --git a/dashboard/dashboard/common/report_query.py b/dashboard/dashboard/common/report_query.py index 70d20ada6b9..e29551768b3 100644 --- a/dashboard/dashboard/common/report_query.py +++ b/dashboard/dashboard/common/report_query.py @@ -26,7 +26,7 @@ def TableRowDescriptors(table_row): yield descriptor.Descriptor(test_suite, table_row['measurement'], bot) -class ReportQuery(object): +class ReportQuery: """Take a template and revisions. Return a report. Templates look like this: { @@ -143,7 +143,7 @@ def _IgnoreDataWithWrongUnits(self, table_row): if datum['units'] == table_row['units']: new_data.append(datum) else: - logging.warn('Expected units=%r; %r', table_row['units'], datum) + logging.warning('Expected units=%r; %r', table_row['units'], datum) table_row['data'][rev] = new_data def _MergeData(self, table_row): @@ -276,7 +276,7 @@ def _GetDataRow(self, test_path, rev): if not entities: raise ndb.Return(None) if len(entities) > 1: - logging.warn('Found too many Row entities: %r %r', rev, test_path) + logging.warning('Found too many Row entities: %r %r', rev, test_path) raise ndb.Return(None) raise ndb.Return(entities[0]) @@ -285,7 +285,7 @@ def _GetDataRowForKey(self, test_key, rev): query = graph_data.Row.query(graph_data.Row.parent_test == test_key) if rev != 'latest': query = query.filter(graph_data.Row.revision <= rev) - query = query.order(-graph_data.Row.revision) + query = query.order(-graph_data.Row.revision) # pylint: disable=invalid-unary-operand-type data_row = yield query.get_async() raise ndb.Return(data_row) diff --git a/dashboard/dashboard/common/report_query_test.py b/dashboard/dashboard/common/report_query_test.py index f7bc7bf13ee..697e5c4fc8a 100644 --- a/dashboard/dashboard/common/report_query_test.py +++ b/dashboard/dashboard/common/report_query_test.py @@ -22,7 +22,7 @@ class ReportQueryTest(testing_common.TestCase): def setUp(self): - super(ReportQueryTest, self).setUp() + super().setUp() stored_object.Set(descriptor.PARTIAL_TEST_SUITES_KEY, []) stored_object.Set(descriptor.COMPOSITE_TEST_SUITES_KEY, []) stored_object.Set(descriptor.GROUPABLE_TEST_SUITE_PREFIXES_KEY, []) diff --git a/dashboard/dashboard/common/request_handler.py b/dashboard/dashboard/common/request_handler.py index 7dd8d3183b8..90a8039e256 100644 --- a/dashboard/dashboard/common/request_handler.py +++ b/dashboard/dashboard/common/request_handler.py @@ -8,15 +8,17 @@ import logging import os +import six import jinja2 -import webapp2 from google.appengine.api import users from dashboard.common import utils from dashboard.common import xsrf +from flask import make_response, request + _DASHBOARD_PYTHON_DIR = os.path.dirname(os.path.dirname(__file__)) JINJA2_ENVIRONMENT = jinja2.Environment( @@ -27,94 +29,90 @@ extensions=['jinja2.ext.autoescape']) -class RequestHandler(webapp2.RequestHandler): - """Base class for requests. Does common template and error handling tasks.""" - - def RenderHtml(self, template_file, template_values, status=200): - """Renders HTML given template and values. - - Args: - template_file: string. File name under templates directory. - template_values: dict. Mapping of template variables to corresponding. - values. - status: int. HTTP status code. - """ - self.response.set_status(status) - template = JINJA2_ENVIRONMENT.get_template(template_file) - self.GetDynamicVariables(template_values) - self.response.out.write(template.render(template_values)) - - def RenderStaticHtml(self, filename): - filename = os.path.join(_DASHBOARD_PYTHON_DIR, 'static', filename) - contents = open(filename, 'r') - self.response.out.write(contents.read()) - contents.close() - - def GetDynamicVariables(self, template_values, request_path=None): - """Gets the values that vary for every page. - - Args: - template_values: dict of name/value pairs. - request_path: path for login urls, None if using the current path. - """ - user_info = '' - xsrf_token = '' - user = users.get_current_user() - display_username = 'Sign in' - title = 'Sign in to an account' - is_admin = False - if user: - display_username = user.email() - title = 'Switch user' - xsrf_token = xsrf.GenerateToken(user) - is_admin = users.is_current_user_admin() - try: - login_url = users.create_login_url(request_path or self.request.path_qs) - except users.RedirectTooLongError: - # On the bug filing pages, the full login URL can be too long. Drop - # the correct redirect URL, since the user should already be logged in at - # this point anyway. - login_url = users.create_login_url('/') - user_info = '%s' % (login_url, title, - display_username) - # Force out of passive login, as it creates multilogin issues. - login_url = login_url.replace('passive=true', 'passive=false') - template_values['login_url'] = login_url - template_values['display_username'] = display_username - template_values['user_info'] = user_info - template_values['is_admin'] = is_admin - template_values['is_internal_user'] = utils.IsInternalUser() - template_values['xsrf_token'] = xsrf_token - template_values['xsrf_input'] = ( - '' % xsrf_token) - template_values['login_url'] = login_url - return template_values - - def ReportError(self, error_message, status=500): - """Reports the given error to the client and logs the error. - - Args: - error_message: The message to log and send to the client. - status: The HTTP response code to use. - """ - logging.error('Reporting error: %r', error_message) - self.response.set_status(status) - self.response.out.write('%s\nrequest_id:%s\n' % - (error_message, utils.GetRequestId())) - - def ReportWarning(self, warning_message, status=200): - """Reports a warning to the client and logs the warning. - - Args: - warning_message: The warning message to log (as an error). - status: The http response code to use. - """ - logging.warning('Reporting warning: %r', warning_message) - self.response.set_status(status) - self.response.out.write('%s\nrequest_id:%s\n' % - (warning_message, utils.GetRequestId())) +def RequestHandlerRenderHtml(template_file, template_values, status=200): + """Renders HTML given template and values. + + Args: + template_file: string. File name under templates directory. + template_values: dict. Mapping of template variables to corresponding. + values. + status: int. HTTP status code. + """ + template = JINJA2_ENVIRONMENT.get_template(template_file) + RequestHandlerGetDynamicVariables(template_values) + return make_response(template.render(template_values), status) + + +def RequestHandlerRenderStaticHtml(filename): + filename = os.path.join(_DASHBOARD_PYTHON_DIR, 'static', filename) + with open(filename, 'r', encoding='utf-8') as contents: + return make_response(contents.read()) + + +def RequestHandlerGetDynamicVariables(template_values, request_path=None): + """Gets the values that vary for every page. + + Args: + template_values: dict of name/value pairs. + request_path: path for login urls, None if using the current path. + """ + user_info = '' + xsrf_token = '' + user = users.get_current_user() + display_username = 'Sign in' + title = 'Sign in to an account' + is_admin = False + if user: + display_username = user.email() + title = 'Switch user' + xsrf_token = six.ensure_str(xsrf.GenerateToken(user)) + is_admin = users.is_current_user_admin() + try: + login_url = users.create_login_url(request_path or request.full_path) + except users.RedirectTooLongError: + # On the bug filing pages, the full login URL can be too long. Drop + # the correct redirect URL, since the user should already be logged in at + # this point anyway. + login_url = users.create_login_url('/') + user_info = '%s' % (login_url, title, + display_username) + # Force out of passive login, as it creates multilogin issues. + login_url = login_url.replace('passive=true', 'passive=false') + template_values['login_url'] = login_url + template_values['display_username'] = display_username + template_values['user_info'] = user_info + template_values['is_admin'] = is_admin + template_values['is_internal_user'] = utils.IsInternalUser() + template_values['xsrf_token'] = xsrf_token + template_values['xsrf_input'] = ( + '' % xsrf_token) + template_values['login_url'] = login_url + return template_values + + +def RequestHandlerReportError(error_message, status=500): + """Reports the given error to the client and logs the error. + + Args: + error_message: The message to log and send to the client. + status: The HTTP response code to use. + """ + logging.error('Reporting error: %r', error_message) + return make_response( + '%s\nrequest_id:%s\n' % (error_message, utils.GetRequestId()), status) + + +def RequestHandlerReportWarning(warning_message, status=200): + """Reports a warning to the client and logs the warning. + + Args: + warning_message: The warning message to log (as an error). + status: The http response code to use. + """ + logging.warning('Reporting warning: %r', warning_message) + return make_response( + '%s\nrequest_id:%s\n' % (warning_message, utils.GetRequestId()), status) class InvalidInputError(Exception): """An error class for invalid user input query parameter values.""" - pass diff --git a/dashboard/dashboard/common/sandwich_allowlist.py b/dashboard/dashboard/common/sandwich_allowlist.py new file mode 100644 index 00000000000..f905e8329d1 --- /dev/null +++ b/dashboard/dashboard/common/sandwich_allowlist.py @@ -0,0 +1,40 @@ +# Copyright 2023 The Chromium Authors +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +from __future__ import absolute_import + +from dashboard.common import feature_flags + +BLOCKED_SUBSCRIPTIONS = [] + +BLOCKED_BENCHMARKS = [] + +BLOCKED_DEVICES = [ + 'lacros-eve-perf', + 'mac-m1_mini_2020-perf-pgo', + 'mac-m1-pro-perf', + 'mac-14-m1-pro-perf', + 'win-10_amd-perf', + 'Win 7 Perf', + 'Win 7 Nvidia GPU Perf', +] + + +def CheckAllowlist(subscription, benchmark, cfg): + '''Check that the subscription, benchmark, and device are + CABE compatible. + + Args: + subscription: regression subscription + benchmark: regression benchmark + cfg: regression configuration + + Returns: + True if allowed, False if not. + ''' + if (feature_flags.SANDWICH_VERIFICATION + and subscription not in BLOCKED_SUBSCRIPTIONS + and benchmark not in BLOCKED_BENCHMARKS and cfg not in BLOCKED_DEVICES): + return True + + return False diff --git a/dashboard/dashboard/common/stored_object.py b/dashboard/dashboard/common/stored_object.py index 5087bfe2e87..d44d0db228b 100644 --- a/dashboard/dashboard/common/stored_object.py +++ b/dashboard/dashboard/common/stored_object.py @@ -20,11 +20,7 @@ from __future__ import division from __future__ import absolute_import -try: - import cPickle as pickle -except ImportError: - # pickle in python 3 uses the c version as cPickle in python 2. - import pickle +import pickle from google.appengine.ext import ndb @@ -122,7 +118,7 @@ def GetPartsAsync(self): for i in range(self.size) ] part_entities = yield ndb.get_multi_async(part_keys) - serialized = ''.join(p.value for p in part_entities if p is not None) + serialized = b''.join(p.value for p in part_entities if p is not None) self.SetData(pickle.loads(serialized)) @classmethod diff --git a/dashboard/dashboard/common/stored_object_test.py b/dashboard/dashboard/common/stored_object_test.py index a5be39c1f54..d9fd961081e 100644 --- a/dashboard/dashboard/common/stored_object_test.py +++ b/dashboard/dashboard/common/stored_object_test.py @@ -12,7 +12,7 @@ from dashboard.common import testing_common -class SampleSerializableClass(object): +class SampleSerializableClass: def __init__(self, data): self.data = data @@ -26,6 +26,9 @@ def __init__(self, data): def __eq__(self, other): return self.__dict__ == other.__dict__ + def __hash__(self): + return hash(self.data, self.user_name, self.user_id, self.family) + class StoredObjectTest(testing_common.TestCase): diff --git a/dashboard/dashboard/common/testing_common.py b/dashboard/dashboard/common/testing_common.py index 1c1aab992ac..5a66638b83b 100644 --- a/dashboard/dashboard/common/testing_common.py +++ b/dashboard/dashboard/common/testing_common.py @@ -17,8 +17,8 @@ import re import sys import unittest -import urllib -import webapp2 +import six +import six.moves.urllib.parse import webtest from google.appengine.api import oauth @@ -31,6 +31,7 @@ from dashboard.common import stored_object from dashboard.common import utils from dashboard.models import graph_data +from dashboard.services import request as request_service _QUEUE_YAML_DIR = os.path.join(os.path.dirname(__file__), '..', '..') @@ -42,7 +43,12 @@ email='external@example.com', _auth_domain='example.com') -class FakeRequestObject(object): +def CheckSandwichAllowlist(subscription, benchmark, cfg): + return not ('blocked' in subscription or 'blocked' in benchmark + or 'blocked' in cfg) + + +class FakeRequestObject: """Fake Request object which can be used by datastore_hooks mocks.""" def __init__(self, remote_addr=None): @@ -50,7 +56,7 @@ def __init__(self, remote_addr=None): self.remote_addr = remote_addr -class FakeResponseObject(object): +class FakeResponseObject: """Fake Response Object which can be returned by urlfetch mocks.""" def __init__(self, status_code, content): @@ -88,11 +94,11 @@ def setUp(self): self.logger.addHandler(self.stream_handler) self.addCleanup(self.logger.removeHandler, self.stream_handler) - def SetUpApp(self, handlers): - self.testapp = webtest.TestApp(webapp2.WSGIApplication(handlers)) + def SetUpFlaskApp(self, flask_app): + self.testapp = webtest.TestApp(flask_app) def PatchEnviron(self, path): - environ_patch = {'REQUEST_URI': path} + environ_patch = {'PATH_INFO': path} try: if oauth.get_current_user(utils.OAUTH_SCOPES): # SetCurrentUserOAuth mocks oauth.get_current_user() directly. That @@ -107,8 +113,15 @@ def PatchEnviron(self, path): environ_patch['HTTP_AUTHORIZATION'] = '' except oauth.Error: pass + if self.testapp: + # In Python 3, the 'HTTP_AUTHORIZATION' is found removed in the handler. + self.testapp.extra_environ.update(environ_patch) return mock.patch.dict(os.environ, environ_patch) + def Get(self, path, *args, **kwargs): + with self.PatchEnviron(path): + return self.testapp.get(path, *args, **kwargs) + def Post(self, path, *args, **kwargs): with self.PatchEnviron(path): return self.testapp.post(path, *args, **kwargs) @@ -120,9 +133,12 @@ def ExecuteTaskQueueTasks(self, handler_name, task_queue_name, recurse=True): task_queue.FlushQueue(task_queue_name) responses = [] for task in tasks: - responses.append( - self.Post(handler_name, - urllib.unquote_plus(base64.b64decode(task['body'])))) + # In python 3.8, unquote_plus() and unquote() accept string only. From + # python 3.9, unquote() accept bytes as well. For now, vpython is on + # 3.8 and thus we have to use six.ensure_str. + data = six.moves.urllib.parse.unquote_plus( + six.ensure_str(base64.b64decode(task['body']))) + responses.append(self.Post(handler_name, data)) if recurse: responses.extend( self.ExecuteTaskQueueTasks(handler_name, task_queue_name)) @@ -165,6 +181,10 @@ def UnsetCurrentUser(self): self.testbed.setup_env( user_is_admin='0', user_email='', user_id='', overwrite=True) + def SetUserGroupMembership(self, user_email, group_name, is_member): + """Sets the group membership of the user""" + utils.SetCachedIsGroupMember(user_email, group_name, is_member) + def GetEmbeddedVariable(self, response, var_name): """Gets a variable embedded in a script element in a response. @@ -331,7 +351,7 @@ def SetIpAllowlist(ip_addresses): # TODO(fancl): Make it a "real" fake issue tracker. -class FakeIssueTrackerService(object): +class FakeIssueTrackerService: """A fake version of IssueTrackerService that saves call values.""" def __init__(self): @@ -373,15 +393,17 @@ def __init__(self): 'Blink>ServiceWorker', 'Foo>Bar', ], + 'mergedInto': {}, 'published': '2017-06-28T01:26:53', 'updated': '2018-03-01T16:16:22', } # TODO(dberris): Migrate users to not rely on the seeded issue. self.issues = { - ('chromium', self._bug_id_counter): { - k: v for k, v in itertools.chain(self._base_issue.items(), [( - 'id', self._bug_id_counter), ('projectId', 'chromium')]) - } + ('chromium', self._bug_id_counter): + dict( + itertools.chain( + list(self._base_issue.items()), + [('id', self._bug_id_counter), ('projectId', 'chromium')])) } self.issue_comments = {('chromium', self._bug_id_counter): []} @@ -403,15 +425,15 @@ def NewBug(self, *args, **kwargs): }) # TODO(dberris): In the future, actually generate the issue. self.issues.update({ - (kwargs.get('project', 'chromium'), self._bug_id_counter): { - k: v - for k, v in itertools.chain(self._base_issue.items(), kwargs.items( - ), [('id', self._bug_id_counter - ), ('projectId', kwargs.get('project', 'chromium'))]) - } + (kwargs.get('project', 'chromium'), self._bug_id_counter): + dict( + itertools.chain( + list(self._base_issue.items()), list(kwargs.items()), + [('id', self._bug_id_counter), + ('projectId', kwargs.get('project', 'chromium'))])) }) result = { - 'bug_id': self._bug_id_counter, + 'issue_id': self._bug_id_counter, 'project_id': kwargs.get('project', 'chromium') } self._bug_id_counter += 1 @@ -431,20 +453,38 @@ def AddBugComment(self, *args, **kwargs): 'state': ('closed' if kwargs.get('status') in {'WontFix', 'Fixed'} else 'open') }) + + # It was not fun to discover that these lines had to be added before components + # passed to perf_issue_service_client.PostIssueComment would show up as side + # effects at assertion time in unit tests. + if 'components' in kwargs: + components = kwargs.get('components') + self.issues.setdefault(issue_key, {}).update({ + 'components': components + }) + if 'labels' in kwargs and kwargs.get('labels') is not None: + labels = kwargs.get('labels') + existing_labels = self.issues.get(issue_key).get('labels') + issue_labels = set(labels) + if existing_labels is not None: + issue_labels = issue_labels | set(existing_labels) + self.issues.setdefault(issue_key, {}).update({'labels': issue_labels}) + if isinstance(labels, list): + labels.sort() self.calls.append({ 'method': 'AddBugComment', 'args': args, 'kwargs': kwargs, }) - def GetIssue(self, issue_id, project='chromium'): - return self.issues.get((project, issue_id)) + def GetIssue(self, issue_id, project_name='chromium'): + return self.issues.get((project_name, issue_id)) - def GetIssueComments(self, issue_id, project='chromium'): - return self.issue_comments.get((project, issue_id), []) + def GetIssueComments(self, issue_id, project_name='chromium'): + return self.issue_comments.get((project_name, issue_id), []) -class FakeSheriffConfigClient(object): +class FakeSheriffConfigClient: def __init__(self): self.patterns = {} @@ -458,7 +498,7 @@ def Match(self, path, **_): return [], None -class FakeCrrev(object): +class FakeCrrev: def __init__(self): self._response = None @@ -475,7 +515,7 @@ def GetNumbering(self, *args, **kwargs): return self._response -class FakePinpoint(object): +class FakePinpoint: def __init__(self): self.new_job_request = None @@ -493,7 +533,7 @@ def NewJob(self, request): return self._response -class FakeGitiles(object): +class FakeGitiles: def __init__(self, repo_commit_list=None): self._repo_commit_list = repo_commit_list or {} @@ -503,7 +543,7 @@ def CommitInfo(self, repo, revision): return self._repo_commit_list.get(repo, {}).get(revision, {}) -class FakeRevisionInfoClient(object): +class FakeRevisionInfoClient: def __init__(self, infos, revisions): self._infos = infos @@ -530,7 +570,7 @@ def GetRangeRevisionInfo(self, test_key, start, end): return infos -class FakeCASClient(object): +class FakeCASClient: _trees = {} _files = {} @@ -554,11 +594,40 @@ def GetTree(self, cas_ref, page_size=None, page_token=None): def BatchRead(self, cas_instance, digests): digests = [self._NormalizeDigest(d) for d in digests] + + def EncodeData(data): + return base64.b64encode(data.encode('utf-8')).decode() + return { 'responses': [{ - 'data': base64.encodestring( - self._files[cas_instance][(d['hash'], d['sizeBytes'])]), - 'digest': d, + 'data': + EncodeData(self._files[cas_instance][(d['hash'], + d['sizeBytes'])]), + 'digest': + d, 'status': {}, } for d in digests] } + + +class FakeCloudWorkflows: + + def __init__(self): + self.executions = {} + self.create_execution_called_with_anomaly = None + + def CreateExecution(self, anomaly): + self.create_execution_called_with_anomaly = anomaly + new_id = ('execution-id-%s' % len(self.executions)) + self.executions[new_id] = { + 'name': new_id, + 'state': 'ACTIVE', + 'result': None, + 'error': None, + } + return new_id + + def GetExecution(self, execution_id): + if execution_id not in self.executions: + raise request_service.NotFoundError('HTTP status code 404: NOT FOUND', '', '') + return self.executions[execution_id] diff --git a/dashboard/dashboard/common/timing.py b/dashboard/dashboard/common/timing.py index dcf41cb93ff..76d3c9d6ec3 100644 --- a/dashboard/dashboard/common/timing.py +++ b/dashboard/dashboard/common/timing.py @@ -6,12 +6,16 @@ from __future__ import division from __future__ import absolute_import -import gae_ts_mon +try: + import gae_ts_mon +except ImportError: + # When running unit tests, we need to import from infra_libs. + from infra_libs import ts_mon as gae_ts_mon import logging import time -class WallTimeLogger(object): +class WallTimeLogger: def __init__(self, label, description=''): """Initialize a context manager labeled `label` that measures the wall time @@ -46,7 +50,7 @@ def __exit__(self, *unused_args): class CpuTimeLogger(WallTimeLogger): def _Now(self): - return time.clock() + return time.process_time() def _Suffix(self): return 'cpu' diff --git a/dashboard/dashboard/common/utils.py b/dashboard/dashboard/common/utils.py index 4f196790429..77a867d1bf2 100644 --- a/dashboard/dashboard/common/utils.py +++ b/dashboard/dashboard/common/utils.py @@ -7,14 +7,18 @@ from __future__ import absolute_import import collections +import functools import logging import os +import random import re +import six +import six.moves.urllib.parse import time -import urllib from apiclient import discovery from apiclient import errors + from google.appengine.api import app_identity from google.appengine.api import memcache from google.appengine.api import oauth @@ -22,9 +26,8 @@ from google.appengine.api import urlfetch_errors from google.appengine.api import users from google.appengine.ext import ndb -import httplib2 -from oauth2client import client +from dashboard.common import oauth2_utils from dashboard.common import stored_object SHERIFF_DOMAINS_KEY = 'sheriff_domains_key' @@ -37,6 +40,12 @@ _DEFAULT_CUSTOM_METRIC_VAL = 1 OAUTH_SCOPES = ('https://www.googleapis.com/auth/userinfo.email',) OAUTH_ENDPOINTS = ['/api/', '/add_histograms', '/add_point', '/uploads'] +LEGACY_SERVICE_ACCOUNT = ('425761728072-pa1bs18esuhp2cp2qfa1u9vb6p1v6kfu' + '@developer.gserviceaccount.com') +ADC_SERVICE_ACCOUNT = 'chromeperf@appspot.gserviceaccount.com' +_CACHE_TIME = 60*60*2 # 2 hours +DELAY_REPORTING_PLACEHOLDER = 'Speed>Regressions' +DELAY_REPORTING_LABEL = 'Chromeperf-Delay-Reporting' _AUTOROLL_DOMAINS = ( 'chops-service-accounts.iam.gserviceaccount.com', @@ -64,15 +73,22 @@ def IsStale(self, ttl): _PINPOINT_REPO_EXCLUSION_TTL = 60 # seconds _PINPOINT_REPO_EXCLUSION_CACHED = _SimpleCache(0, None) +_STAGING_APP_ID = 'chromeperf-stage' def IsDevAppserver(): - return app_identity.get_application_id() == 'None' + try: + return app_identity.get_application_id() == 'None' + except AttributeError: + return False -def _GetNowRfc3339(): - """Returns the current time formatted per RFC 3339.""" - return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) +def IsStagingEnvironment(): + """ Check if running in staging environment """ + try: + return app_identity.get_application_id() == _STAGING_APP_ID + except AttributeError: + return False def GetEmail(): @@ -87,7 +103,7 @@ def GetEmail(): OAuthRequestError: The request was not a valid OAuth request. OAuthServiceFailureError: An unknown error occurred. """ - request_uri = os.environ.get('REQUEST_URI', '') + request_uri = os.environ.get('PATH_INFO', '') if any(request_uri.startswith(e) for e in OAUTH_ENDPOINTS): # Prevent a CSRF whereby a malicious site posts an api request without an # Authorization header (so oauth.get_current_user() is None), but while the @@ -95,48 +111,16 @@ def GetEmail(): # return a non-None user. if 'HTTP_AUTHORIZATION' not in os.environ: # The user is not signed in. Avoid raising OAuthRequestError. + logging.info('Cannot get user email as the user is not signed in') return None user = oauth.get_current_user(OAUTH_SCOPES) else: - user = users.get_current_user() + user = GetGaeCurrentUser() return user.email() if user else None -@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, xg=True) -def TickMonitoringCustomMetric(metric_name): - """Increments the stackdriver custom metric with the given name. - - This is used for cron job monitoring; if these metrics stop being received - an alert mail is sent. For more information on custom metrics, see - https://cloud.google.com/monitoring/custom-metrics/using-custom-metrics - - Args: - metric_name: The name of the metric being monitored. - """ - credentials = client.GoogleCredentials.get_application_default() - monitoring = discovery.build('monitoring', 'v3', credentials=credentials) - now = _GetNowRfc3339() - project_id = stored_object.Get(_PROJECT_ID_KEY) - points = [{ - 'interval': { - 'startTime': now, - 'endTime': now, - }, - 'value': { - 'int64Value': _DEFAULT_CUSTOM_METRIC_VAL, - }, - }] - write_request = monitoring.projects().timeSeries().create( - name='projects/%s' % project_id, - body={ - 'timeSeries': [{ - 'metric': { - 'type': 'custom.googleapis.com/%s' % metric_name, - }, - 'points': points - }] - }) - write_request.execute() +def GetGaeCurrentUser(): + return users.GetCurrentUser() def TestPath(key): @@ -196,12 +180,13 @@ def TestMetadataKey(key_or_string): """ if key_or_string is None: return None - if isinstance(key_or_string, basestring): + if isinstance(key_or_string, six.string_types): return ndb.Key('TestMetadata', key_or_string) if key_or_string.kind() == 'TestMetadata': return key_or_string if key_or_string.kind() == 'Test': return ndb.Key('TestMetadata', TestPath(key_or_string)) + return None def OldStyleTestKey(key_or_string): @@ -218,12 +203,12 @@ def OldStyleTestKey(key_or_string): """ if key_or_string is None: return None - elif isinstance(key_or_string, ndb.Key) and key_or_string.kind() == 'Test': + if isinstance(key_or_string, ndb.Key) and key_or_string.kind() == 'Test': return key_or_string if (isinstance(key_or_string, ndb.Key) and key_or_string.kind() == 'TestMetadata'): key_or_string = key_or_string.id() - assert isinstance(key_or_string, basestring) + assert isinstance(key_or_string, six.string_types) path_parts = key_or_string.split('/') key_parts = ['Master', path_parts[0], 'Bot', path_parts[1]] for part in path_parts[2:]: @@ -297,7 +282,7 @@ def CmpPatterns(a, b): a_parts = a[0].split('/') b_parts = b[0].split('/') for a_part, b_part, test_part in reversed( - zip(a_parts, b_parts, test_path_parts)): + list(zip(a_parts, b_parts, test_path_parts))): # We favour a specific match over a partial match, and a partial # match over a catch-all * match. if a_part == b_part: @@ -316,7 +301,7 @@ def CmpPatterns(a, b): # 0 to indicate that we've found an equality. return 0 - matching_patterns.sort(cmp=CmpPatterns) # pylint: disable=using-cmp-argument + matching_patterns.sort(key=functools.cmp_to_key(CmpPatterns)) return matching_patterns[0][1] @@ -392,7 +377,7 @@ def _MatchesPatternPart(pattern_part, test_path_part): Returns: True if it matches, False otherwise. """ - if pattern_part == '*' or pattern_part == test_path_part: + if pattern_part in ('*', test_path_part): return True if '*' not in pattern_part: return False @@ -469,7 +454,7 @@ def MinimumRange(ranges): """Returns the intersection of the given ranges, or None.""" if not ranges: return None - starts, ends = zip(*ranges) + starts, ends = list(zip(*ranges)) start, end = (max(starts), min(ends)) if start > end: return None @@ -486,8 +471,11 @@ def IsInternalUser(): cached = GetCachedIsInternalUser(email) if cached is not None: return cached - is_internal_user = IsGroupMember(identity=email, group='chromeperf-access') - SetCachedIsInternalUser(email, is_internal_user) + try: + is_internal_user = IsGroupMember(identity=email, group='chromeperf-access') + SetCachedIsInternalUser(email, is_internal_user) + except GroupMemberAuthFailed: + return False return is_internal_user @@ -502,9 +490,12 @@ def IsAdministrator(email=None): cached = GetCachedIsAdministrator(email) if cached is not None: return cached - is_administrator = IsGroupMember( - identity=email, group='project-chromeperf-admins') - SetCachedIsAdministrator(email, is_administrator) + try: + is_administrator = IsGroupMember( + identity=email, group='project-chromeperf-admins') + SetCachedIsAdministrator(email, is_administrator) + except GroupMemberAuthFailed: + return False return is_administrator @@ -513,7 +504,7 @@ def GetCachedIsInternalUser(email): def SetCachedIsInternalUser(email, value): - memcache.set(_IsInternalUserCacheKey(email), value, time=60 * 60 * 24) + memcache.set(_IsInternalUserCacheKey(email), value, time=_CACHE_TIME) def GetCachedIsAdministrator(email): @@ -521,7 +512,7 @@ def GetCachedIsAdministrator(email): def SetCachedIsAdministrator(email, value): - memcache.set(_IsAdministratorUserCacheKey(email), value, time=60 * 60 * 24) + memcache.set(_IsAdministratorUserCacheKey(email), value, time=_CACHE_TIME) def _IsInternalUserCacheKey(email): @@ -546,8 +537,15 @@ def ShouldTurnOnUploadCompletionTokenExperiment(): email = GetEmail() if not email: return False - return IsGroupMember( - identity=email, group='project-chromeperf-upload-token-experiment') + try: + return IsGroupMember( + identity=email, group='project-chromeperf-upload-token-experiment') + except GroupMemberAuthFailed: + return False + + +class GroupMemberAuthFailed(Exception): + pass def IsGroupMember(identity, group): @@ -558,7 +556,10 @@ def IsGroupMember(identity, group): group: Group name. Returns: - True if confirmed to be a member, False otherwise. + True if user is a member, False otherwise. + + Raises: + GroupMemberAuthFailed: Failed to check if user is a member. """ cached = GetCachedIsGroupMember(identity, group) if cached is not None: @@ -577,8 +578,8 @@ def IsGroupMember(identity, group): SetCachedIsGroupMember(identity, group, is_member) return is_member except (errors.HttpError, KeyError, AttributeError) as e: - logging.error('Failed to check membership of %s: %s', identity, e) - return False + logging.error('Failed to check membership of %s: %s', identity, str(e)) + raise GroupMemberAuthFailed('Failed to authenticate user.') from e def GetCachedIsGroupMember(identity, group): @@ -587,41 +588,28 @@ def GetCachedIsGroupMember(identity, group): def SetCachedIsGroupMember(identity, group, value): memcache.set( - _IsGroupMemberCacheKey(identity, group), value, time=60 * 60 * 24) + _IsGroupMemberCacheKey(identity, group), value, time=_CACHE_TIME) def _IsGroupMemberCacheKey(identity, group): return 'is_group_member_%s_%s' % (identity, group) -@ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, xg=True) -def ServiceAccountEmail(scope=EMAIL_SCOPE): - account_details = stored_object.Get(SERVICE_ACCOUNT_KEY) - if not account_details: - raise KeyError('Service account credentials not found.') - - assert scope, "ServiceAccountHttp scope must not be None." - - return account_details['client_email'], +def ServiceAccountEmail(): + return ADC_SERVICE_ACCOUNT @ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, xg=True) def ServiceAccountHttp(scope=EMAIL_SCOPE, timeout=None): """Returns the Credentials of the service account if available.""" - account_details = stored_object.Get(SERVICE_ACCOUNT_KEY) - if not account_details: - raise KeyError('Service account credentials not found.') - assert scope, "ServiceAccountHttp scope must not be None." - client.logger.setLevel(logging.WARNING) - credentials = client.SignedJwtAssertionCredentials( - service_account_name=account_details['client_email'], - private_key=account_details['private_key'], - scope=scope) + import google_auth_httplib2 # pylint: disable=import-outside-toplevel - http = httplib2.Http(timeout=timeout) - credentials.authorize(http) + credentials = oauth2_utils.GetAppDefaultCredentials(scope) + http = google_auth_httplib2.AuthorizedHttp(credentials) + if timeout: + http.timeout = timeout return http @@ -640,14 +628,21 @@ def IsValidSheriffUser(): def IsTryjobUser(): email = GetEmail() - return bool(email) and IsGroupMember( - identity=email, group='project-pinpoint-tryjob-access') + try: + return bool(email) and IsGroupMember( + identity=email, group='project-pinpoint-tryjob-access') + except GroupMemberAuthFailed: + logging.info('User is not a member of project-pinpoint-tryjob-access.') + return False def IsAllowedToDelegate(email): - return bool(email) and IsGroupMember( - identity=email, - group='project-pinpoint-service-account-delegation-access') + try: + return bool(email) and IsGroupMember( + identity=email, + group='project-pinpoint-service-account-delegation-access') + except GroupMemberAuthFailed: + return False @ndb.transactional(propagation=ndb.TransactionOptions.INDEPENDENT, xg=True) @@ -742,7 +737,7 @@ def FetchURL(request_url, skip_status_code=False): Returns: Response object return by URL fetch, otherwise None when there's an error. """ - logging.info('URL being fetched: ' + request_url) + logging.info('URL being fetched: %s', request_url) try: response = urlfetch.fetch(request_url) except urlfetch_errors.DeadlineExceededError: @@ -755,7 +750,7 @@ def FetchURL(request_url, skip_status_code=False): return None if skip_status_code: return response - elif response.status_code != 200: + if response.status_code != 200: logging.error('ERROR %s checking %s', response.status_code, request_url) return None return response @@ -775,7 +770,7 @@ def GetBuildDetailsFromStdioLink(stdio_link): # This wasn't a buildbot formatted link. return no_details base_url, master, bot, buildnumber, step = m.groups() - bot = urllib.unquote(bot) + bot = six.moves.urllib.parse.unquote(bot) return base_url, master, bot, buildnumber, step @@ -812,3 +807,83 @@ def IsMonitored(sheriff_client, test_path): if subscriptions: return True return False + + +def GetBuildbucketUrl(build_id): + if build_id: + return 'https://ci.chromium.org/b/%s' % build_id + return '' + + +def RequestParamsMixed(req): + """ + Returns a dictionary where the values are either single + values, or a list of values when a key/value appears more than + once in this dictionary. This is similar to the kind of + dictionary often used to represent the variables in a web + request. + """ + result = {} + multi = {} + for key, value in req.values.items(True): + if key in result: + # We do this to not clobber any lists that are + # *actual* values in this dictionary: + if key in multi: + result[key].append(value) + else: + result[key] = [result[key], value] + multi[key] = None + else: + result[key] = value + return result + + +def SanitizeArgs(args, key_name, default): + if key_name not in args: + logging.warning( + '%s is not found in the query arguments. Using "%s" as default.', + key_name, default) + return default + value = args[key_name] + if value in ('', 'undefined'): + logging.warning('%s has %s as the value. Using "%s" as default.', key_name, + value, default) + return default + return value + + +def LogObsoleteHandlerUsage(handler, method): + class_name = type(handler).__name__ + logging.warning('Obsolete PY2 handler is called unexpectedly. %s:%s', + class_name, method) + + +def ConvertBytesBeforeJsonDumps(src): + """ convert a json object to safe to do json.dumps() + + During the python 3 migration, we have seen multiple cases that raw data + is loaded as part of a json object but in bytes. This will fail the + json.dumps() calls on this object. We want to convert all the bytes to + avoid this situation. + """ + + if isinstance(src, dict): + for k, v in src.items(): + if isinstance(v, bytes): + src[k] = six.ensure_str(v) + else: + src[k] = ConvertBytesBeforeJsonDumps(v) + elif isinstance(src, list): + for i, v in enumerate(src): + src[i] = ConvertBytesBeforeJsonDumps(v) + elif isinstance(src, bytes): + return six.ensure_str(src) + return src + + +def ShouldDelayIssueReporting(): + ''' Tells whether issue should not have the component/label/cc when created. + ''' + # At the beginning, we will randomly pick 50% of the issues. + return random.randrange(2) == 0 diff --git a/dashboard/dashboard/common/utils_test.py b/dashboard/dashboard/common/utils_test.py index 415429dc6f2..05372ee468f 100644 --- a/dashboard/dashboard/common/utils_test.py +++ b/dashboard/dashboard/common/utils_test.py @@ -23,7 +23,7 @@ class UtilsTest(testing_common.TestCase): def setUp(self): - super(UtilsTest, self).setUp() + super().setUp() testing_common.SetIsInternalUser('internal@chromium.org', True) testing_common.SetIsInternalUser('foo@chromium.org', False) testing_common.SetIsAdministrator('admin@chromium.org', True) @@ -380,7 +380,7 @@ def testGetBuildDetailsFromStdioLink_DifferentBaseUrl(self): self.assertEqual('new_test', step) @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) - @mock.patch('common.utils.discovery.build') + @mock.patch('apiclient.discovery.build') def testIsGroupMember_PositiveCase(self, mock_discovery_build): mock_request = mock.MagicMock() mock_request.execute = mock.MagicMock(return_value={'is_member': True}) @@ -393,14 +393,15 @@ def testIsGroupMember_PositiveCase(self, mock_discovery_build): @mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock()) @mock.patch('logging.error') - @mock.patch('common.utils.discovery.build') + @mock.patch('apiclient.discovery.build') def testIsGroupMember_RequestFails_LogsErrorAndReturnsFalse( self, mock_discovery_build, mock_logging_error): mock_service = mock.MagicMock() mock_service.membership = mock.MagicMock( return_value={'error': 'Some error'}) mock_discovery_build.return_value = mock_service - self.assertFalse(utils.IsGroupMember('foo@bar.com', 'group')) + with self.assertRaises(utils.GroupMemberAuthFailed): + utils.IsGroupMember('foo@bar.com', 'group') self.assertEqual(1, mock_logging_error.call_count) def testGetSheriffForAutorollCommit_NotAutoroll_ReturnsNone(self): @@ -453,6 +454,7 @@ def testIsAdministrator(self): def testIsNotAdministrator(self): self.assertFalse(utils.IsAdministrator()) + @mock.patch.object(utils, 'IsGroupMember', mock.MagicMock(return_value=False)) @mock.patch.object(utils, 'GetEmail', mock.MagicMock(return_value='internal@chromium.org')) def testShouldTurnOnUploadCompletionTokenExperiment_NotGroupMember(self): @@ -481,6 +483,22 @@ def testIsMonitored_Negative(self): self.assertFalse(utils.IsMonitored(sheriff_client, 'test')) + def testConvertBytesBeforeJsonDumps(self): + none_obj = None + self.assertEqual(None, utils.ConvertBytesBeforeJsonDumps(none_obj), + 'Fail converting none object') + + dict_obj = {'a': b'aa', 'b': [b'bb', 'cc']} + self.assertEqual('{"a": "aa", "b": ["bb", "cc"]}', + json.dumps(utils.ConvertBytesBeforeJsonDumps(dict_obj)), + 'Fail converting dict object') + + list_obj = [{'a': b'aa', 'b': b'bb'}, {'c': b'cc'}] + self.assertEqual('[{"a": "aa", "b": "bb"}, {"c": "cc"}]', + json.dumps(utils.ConvertBytesBeforeJsonDumps(list_obj)), + 'Fail converting list object') + + def _MakeMockFetch(base64_encoded=True, status=200): """Returns a mock fetch object that returns a canned response.""" diff --git a/dashboard/dashboard/common/xsrf.py b/dashboard/dashboard/common/xsrf.py index bb8d50b75c4..ea7eb3babef 100644 --- a/dashboard/dashboard/common/xsrf.py +++ b/dashboard/dashboard/common/xsrf.py @@ -7,11 +7,13 @@ from __future__ import absolute_import import os +import six -from google.appengine.ext import ndb +from flask import abort, request -from oauth2client import xsrfutil +from google.appengine.ext import ndb +from dashboard.common import oauth2_utils from dashboard.common import utils @@ -22,17 +24,18 @@ class XsrfSecretKey(ndb.Model): def _ValidateToken(token, email): """Validates an XSRF token generated by GenerateXsrfToken.""" - return xsrfutil.validate_token( + return oauth2_utils.ValidateXsrfToken( _GetSecretKey(), token, user_id=email, action_id='') def GenerateToken(email): - return xsrfutil.generate_token(_GetSecretKey(), user_id=email, action_id='') + return oauth2_utils.GenerateXsrfToken( + _GetSecretKey(), user_id=email, action_id='') def _GenerateNewSecretKey(): """Returns a random XSRF secret key.""" - return str(os.urandom(16).encode('hex')) + return os.urandom(16).hex() def _GetSecretKey(): @@ -47,11 +50,11 @@ def _GetSecretKey(): def TokenRequired(handler_method): """A decorator to require that the XSRF token be validated for the handler.""" - def CheckToken(self, *args, **kwargs): + def CheckToken(*args, **kwargs): email = utils.GetEmail() - token = str(self.request.get('xsrf_token')) - if not email or not _ValidateToken(token, email): - self.abort(403) - handler_method(self, *args, **kwargs) + token = str(request.values.get('xsrf_token')) + if not email or not _ValidateToken(six.ensure_binary(token), email): + abort(403) + return handler_method(*args, **kwargs) return CheckToken diff --git a/dashboard/dashboard/common/xsrf_test.py b/dashboard/dashboard/common/xsrf_test.py index 84c1152403b..91f03894f9d 100644 --- a/dashboard/dashboard/common/xsrf_test.py +++ b/dashboard/dashboard/common/xsrf_test.py @@ -7,31 +7,28 @@ from __future__ import absolute_import import unittest - -import webapp2 import webtest +from flask import Flask from google.appengine.api import users -from dashboard.common import request_handler from dashboard.common import testing_common from dashboard.common import xsrf +flask_app = Flask(__name__) -class ExampleHandler(request_handler.RequestHandler): - """Example request handler that uses a XSRF token.""" - @xsrf.TokenRequired - def post(self): - pass +@flask_app.route('/example', methods=['POST']) +@xsrf.TokenRequired +def PostFlask(): + return '

HTML

' class XsrfTest(testing_common.TestCase): def setUp(self): - super(XsrfTest, self).setUp() - app = webapp2.WSGIApplication([('/example', ExampleHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.flask_testapp = webtest.TestApp(flask_app) def testGenerateToken_CanBeValidatedWithSameUser(self): self.SetCurrentUser('foo@bar.com') @@ -44,18 +41,19 @@ def testGenerateToken_CanNotBeValidatedWithDifferentUser(self): self.SetCurrentUser('foo@other.com', user_id='y') self.assertFalse(xsrf._ValidateToken(token, users.get_current_user())) + def testTokenRequired_NoToken_Returns403(self): - self.testapp.post('/example', {}, status=403) + self.flask_testapp.post('/example', {}, status=403) def testTokenRequired_BogusToken_Returns403(self): - self.testapp.post( + self.flask_testapp.post( '/example', {'xsrf_token': 'abcdefghijklmnopqrstuvwxyz0123456789'}, status=403) def testTokenRequired_CorrectToken_Success(self): self.SetCurrentUser('foo@bar.com') token = xsrf.GenerateToken(users.get_current_user()) - self.testapp.post('/example', {'xsrf_token': token}) + self.flask_testapp.post('/example', {'xsrf_token': token}) if __name__ == '__main__': diff --git a/dashboard/dashboard/create_health_report.py b/dashboard/dashboard/create_health_report.py deleted file mode 100644 index cdd7bef09af..00000000000 --- a/dashboard/dashboard/create_health_report.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""Provides the web interface for adding and editing sheriff rotations.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import logging -import json - -from google.appengine.api import users -from google.appengine.ext import ndb - -from dashboard.common import request_handler -from dashboard.common import utils -from dashboard.common import xsrf -from dashboard.models import table_config - - -class CreateHealthReportHandler(request_handler.RequestHandler): - - def get(self): - """Renders the UI with the form fields.""" - self.RenderStaticHtml('create_health_report.html') - - def post(self): - """POSTS the data to the datastore.""" - - user = users.get_current_user() - if not user: - self.response.out.write(json.dumps({'error': 'User not logged in.'})) - return - if not utils.IsInternalUser(): - self.response.out.write( - json.dumps({ - 'error': - 'Unauthorized access, please use chromium account to login.' - })) - return - - get_token = self.request.get('getToken') - get_table_config_list = self.request.get('getTableConfigList') - get_table_config_details = self.request.get('getTableConfigDetails') - if get_token == 'true': - values = {} - self.GetDynamicVariables(values) - self.response.out.write( - json.dumps({ - 'xsrf_token': values['xsrf_token'], - })) - elif get_table_config_list: - self._GetTableConfigList() - elif get_table_config_details: - self._GetTableConfigDetails(get_table_config_details) - else: - self._CreateTableConfig() - - def _GetTableConfigList(self): - query = table_config.TableConfig.query() - table_config_list = query.fetch(keys_only=True) - return_list = [] - for config in table_config_list: - return_list.append(config.id()) - self.response.out.write(json.dumps({ - 'table_config_list': return_list, - })) - - def _GetTableConfigDetails(self, config_name): - config_entity = ndb.Key('TableConfig', config_name).get() - if config_entity: - master_bot_list = [] - for bot in config_entity.bots: - master_bot_list.append(bot.parent().string_id() + '/' + bot.string_id()) - self.response.out.write( - json.dumps({ - 'table_name': config_name, - 'table_bots': master_bot_list, - 'table_tests': config_entity.tests, - 'table_layout': config_entity.table_layout - })) - else: - self.response.out.write(json.dumps({'error': 'Invalid config name.'})) - - def _CreateTableConfig(self): - """Creates a table config. Writes a valid name or an error message.""" - self._ValidateToken() - name = self.request.get('tableName') - master_bot = self.request.get('tableBots').splitlines() - tests = self.request.get('tableTests').splitlines() - table_layout = self.request.get('tableLayout') - override = int(self.request.get('override')) - user = users.get_current_user() - if not name or not master_bot or not tests or not table_layout or not user: - self.response.out.write( - json.dumps({'error': 'Please fill out the form entirely.'})) - return - - try: - created_table = table_config.CreateTableConfig( - name=name, - bots=master_bot, - tests=tests, - layout=table_layout, - username=user.email(), - override=override) - except table_config.BadRequestError as error: - self.response.out.write(json.dumps({ - 'error': error.message, - })) - logging.error('BadRequestError: %r', error.message) - return - - if created_table: - self.response.out.write(json.dumps({ - 'name': name, - })) - else: - self.response.out.write( - json.dumps({ - 'error': 'Could not create table.', - })) - logging.error('Could not create table.') - - def _ValidateToken(self): - user = users.get_current_user() - token = str(self.request.get('xsrf_token')) - if not user or not xsrf._ValidateToken(token, user): - self.abort(403) diff --git a/dashboard/dashboard/create_health_report_test.py b/dashboard/dashboard/create_health_report_test.py deleted file mode 100644 index b4f64f4a01a..00000000000 --- a/dashboard/dashboard/create_health_report_test.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import copy -import webapp2 -import webtest - -from google.appengine.ext import ndb -from google.appengine.api import users - -from dashboard import create_health_report -from dashboard.common import testing_common -from dashboard.common import xsrf -from dashboard.models import table_config -from dashboard.models import graph_data - -# Sample table config that contains all the required fields. -_SAMPLE_TABLE_CONFIG = { - 'tableName': 'my_sample_config', - 'tableBots': 'ChromiumPerf/win\nChromiumPerf/linux', - 'tableTests': 'my_test_suite/my_test\nmy_test_suite/my_other_test', - 'tableLayout': '{"my_test_suite/my_test": ["Foreground", ' - '"Pretty Name 1"], "my_test_suite/my_other_test": ' - '["Foreground", "Pretty Name 2"]}', - 'override': '0', -} - -_ALT_SAMPLE_TABLE_CONFIG = { - 'tableName': 'my_other_config', - 'tableBots': 'ChromiumPerf/win\nChromiumPerf/linux', - 'tableTests': 'my_test_suite/my_test\nmy_test_suite/my_other_test', - 'tableLayout': '{"my_test_suite/my_test": ["Foreground", ' - '"Pretty Name 1"], "my_test_suite/my_other_test": ' - '["Foreground", "Pretty Name 2"]}', - 'override': '0', -} - - -class CreateHealthReportTest(testing_common.TestCase): - - def setUp(self): - super(CreateHealthReportTest, self).setUp() - app = webapp2.WSGIApplication([ - ('/create_health_report', - create_health_report.CreateHealthReportHandler) - ]) - self.testapp = webtest.TestApp(app) - testing_common.SetSheriffDomains(['chromium.org']) - testing_common.SetIsInternalUser('internal@chromium.org', True) - self.SetCurrentUser('internal@chromium.org', is_admin=True) - - def tearDown(self): - super(CreateHealthReportTest, self).tearDown() - self.UnsetCurrentUser() - - def _AddInternalBotsToDataStore(self): - """Adds sample bot/master pairs.""" - self._AddTests() - bots = graph_data.Bot.query().fetch() - for bot in bots: - bot.internal_only = True - bot.put() - - def _AddMixedBotsToDataStore(self): - """Adds sample bot/master pairs.""" - self._AddTests() - bots = graph_data.Bot.query().fetch() - bots[1].internal_only = True - bots[1].put() - - def _AddPublicBotsToDataStore(self): - """Adds sample bot/master pairs.""" - self._AddTests() - - def _AddTests(self): - testing_common.AddTests( - ['ChromiumPerf'], ['win', 'linux'], - {'my_test_suite': { - 'my_test': {}, - 'my_other_test': {}, - }}) - - def testPost_NoXSRFToken_Returns403Error(self): - self.testapp.post('/create_health_report', {}, status=403) - query = table_config.TableConfig.query() - table_values = query.fetch() - self.assertEqual(len(table_values), 0) - - def testPost_GetXsrfToken(self): - response = self.testapp.post('/create_health_report', { - 'getToken': 'true', - }) - self.assertIn(xsrf.GenerateToken(users.get_current_user()), response) - - def testGet_ShowPage(self): - response = self.testapp.get('/create_health_report') - self.assertIn('create-health-report-page', response) - - def testPost_ExternalUserReturnsNotLoggedIn(self): - self.UnsetCurrentUser() - response = self.testapp.post('/create_health_report', {}) - self.assertIn('User not logged in.', response) - - def testPost_ValidData(self): - self._AddInternalBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - response = self.testapp.post('/create_health_report', config) - self.assertIn('my_sample_config', response) - table_entity = ndb.Key('TableConfig', 'my_sample_config').get() - self.assertTrue(table_entity.internal_only) - self.assertEqual('internal@chromium.org', table_entity.username) - self.assertEqual(['my_test_suite/my_test', 'my_test_suite/my_other_test'], - table_entity.tests) - master_key = ndb.Key('Master', 'ChromiumPerf') - win_bot = graph_data.Bot( - id='win', parent=master_key, internal_only=False).key - linux_bot = graph_data.Bot( - id='linux', parent=master_key, internal_only=False).key - bots = [win_bot, linux_bot] - self.assertEqual(bots, table_entity.bots) - self.assertEqual( - '{"my_test_suite/my_test": ["Foreground", "Pretty Name 1"], ' - '"my_test_suite/my_other_test": ["Foreground", "Pretty Name 2"]}', - table_entity.table_layout) - - def testPost_EmptyForm(self): - response = self.testapp.post('/create_health_report', { - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - 'override': 0, - }) - self.assertIn('Please fill out the form entirely.', response) - query = table_config.TableConfig.query() - table_values = query.fetch() - self.assertEqual(len(table_values), 0) - - def testPost_TwoPostsSameNameReturnsError(self): - self._AddInternalBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - self.testapp.post('/create_health_report', config) - response = self.testapp.post('/create_health_report', config) - self.assertIn('my_sample_config already exists.', response) - - def testPost_InvalidBots(self): - self._AddInternalBotsToDataStore() - response = self.testapp.post( - '/create_health_report', { - 'tableName': 'myName', - 'tableBots': 'garbage/moarGarbage', - 'tableTests': 'my_test_suite/my_test', - 'tableLayout': '{"Alayout":"isHere"}', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - 'override': 0, - }) - self.assertIn('Invalid Master/Bot: garbage/moarGarbage', response) - query = table_config.TableConfig.query() - table_values = query.fetch() - self.assertEqual(len(table_values), 0) - - def testPost_InternalOnlyAndPublicBots(self): - self._AddMixedBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - self.testapp.post('/create_health_report', config) - table_entity = ndb.Key('TableConfig', 'my_sample_config').get() - self.assertTrue(table_entity.internal_only) - - def testPost_PublicOnlyBots(self): - self._AddPublicBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - self.testapp.post('/create_health_report', config) - table_entity = ndb.Key('TableConfig', 'my_sample_config').get() - self.assertFalse(table_entity.internal_only) - - def testPost_ValidBotsBadLayout(self): - self._AddPublicBotsToDataStore() - response = self.testapp.post( - '/create_health_report', { - 'tableName': 'myName', - 'tableBots': 'ChromiumPerf/linux', - 'tableTests': 'my_test_suite/my_test', - 'tableLayout': 'garbage', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - 'override': 0, - }) - self.assertIn('Invalid JSON for table layout', response) - query = table_config.TableConfig.query() - table_values = query.fetch() - self.assertEqual(len(table_values), 0) - - def testPost_InvalidTests(self): - self._AddInternalBotsToDataStore() - response = self.testapp.post( - '/create_health_report', { - 'tableName': 'myName', - 'tableBots': 'ChromiumPerf/linux', - 'tableTests': 'someTests', - 'tableLayout': '{"Alayout":"isHere"}', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - 'override': 0, - }) - self.assertIn('someTests is not a valid test.', response) - query = table_config.TableConfig.query() - table_values = query.fetch() - self.assertEqual(len(table_values), 0) - - def testPost_GetTableConfigList(self): - self._AddInternalBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - alt_config = copy.deepcopy(_ALT_SAMPLE_TABLE_CONFIG) - alt_config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - self.testapp.post('/create_health_report', config) - self.testapp.post('/create_health_report', alt_config) - - response = self.testapp.post('/create_health_report', { - 'getTableConfigList': True, - }) - return_list = self.GetJsonValue(response, 'table_config_list') - query = table_config.TableConfig.query() - all_configs = query.fetch(keys_only=True) - for config in all_configs: - self.assertIn(config.id(), return_list) - - def testPost_GetTableConfigDetailsForEdit(self): - self._AddInternalBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - - self.testapp.post('/create_health_report', config) - - response = self.testapp.post('/create_health_report', { - 'getTableConfigDetails': 'my_sample_config', - }) - # Similar to the valid data test, ensure everything is correct. - self.assertIn('my_sample_config', response) - table_entity = ndb.Key('TableConfig', 'my_sample_config').get() - self.assertTrue(table_entity.internal_only) - self.assertEqual('internal@chromium.org', table_entity.username) - self.assertEqual(['my_test_suite/my_test', 'my_test_suite/my_other_test'], - table_entity.tests) - master_key = ndb.Key('Master', 'ChromiumPerf') - win_bot = graph_data.Bot( - id='win', parent=master_key, internal_only=False).key - linux_bot = graph_data.Bot( - id='linux', parent=master_key, internal_only=False).key - bots = [win_bot, linux_bot] - self.assertEqual(bots, table_entity.bots) - self.assertEqual( - '{"my_test_suite/my_test": ["Foreground", "Pretty Name 1"], ' - '"my_test_suite/my_other_test": ["Foreground", "Pretty Name 2"]}', - table_entity.table_layout) - - def testPost_TwoPostsSameNameAsEdit(self): - self._AddInternalBotsToDataStore() - config = copy.deepcopy(_SAMPLE_TABLE_CONFIG) - config['xsrf_token'] = xsrf.GenerateToken(users.get_current_user()) - self.testapp.post('/create_health_report', config) - - config['override'] = 1 - config['tableLayout'] = ( - '{"my_test_suite/my_test": ["Foreground", "New Name 1"], ' - '"my_test_suite/my_other_test": ["Foreground", "New Name 2"]}') - response = self.testapp.post('/create_health_report', config) - - self.assertIn('my_sample_config', response) - self.assertNotIn('already exists.', response) - table_entity = ndb.Key('TableConfig', 'my_sample_config').get() - self.assertIn('my_test_suite/my_test": ["Foreground", "New Name 1"]', - table_entity.table_layout) - self.assertIn('my_test_suite/my_other_test": ["Foreground", "New Name 2"]', - table_entity.table_layout) diff --git a/dashboard/dashboard/dispatcher.py b/dashboard/dashboard/dispatcher.py index 48610c8acbb..ba3f97a6760 100644 --- a/dashboard/dashboard/dispatcher.py +++ b/dashboard/dashboard/dispatcher.py @@ -6,139 +6,351 @@ from __future__ import division from __future__ import absolute_import -import gae_ts_mon -import webapp2 +from flask import Flask, request as flask_request, make_response +import logging + +from google.appengine.api import wrap_wsgi_app +import google.cloud.logging +try: + import googleclouddebugger + googleclouddebugger.enable(breakpoint_enable_canary=True) +except ImportError: + pass from dashboard import add_histograms from dashboard import add_histograms_queue from dashboard import add_point from dashboard import add_point_queue -from dashboard import alerts from dashboard import alert_groups +from dashboard import alerts from dashboard import associate_alerts -from dashboard import bug_details from dashboard import buildbucket_job_status -from dashboard import create_health_report from dashboard import dump_graph_json from dashboard import edit_anomalies -from dashboard import edit_anomaly_configs -from dashboard import edit_bug_labels from dashboard import edit_site_config from dashboard import file_bug -from dashboard import get_diagnostics -from dashboard import get_histogram from dashboard import graph_csv from dashboard import graph_json from dashboard import graph_revisions from dashboard import group_report -from dashboard import jstsmon from dashboard import layered_cache_delete_expired from dashboard import list_tests from dashboard import load_from_prod from dashboard import main -from dashboard import mark_recovered_alerts -from dashboard import memory_report from dashboard import migrate_test_names +from dashboard import migrate_test_names_tasks +from dashboard import mark_recovered_alerts from dashboard import navbar -from dashboard import oauth2_decorator from dashboard import pinpoint_request -from dashboard import put_entities_task from dashboard import report from dashboard import sheriff_config_poller from dashboard import short_uri -from dashboard import speed_releasing from dashboard import update_dashboard_stats -from dashboard import update_test_suite_descriptors from dashboard import update_test_suites +from dashboard import update_test_suite_descriptors from dashboard import uploads_info from dashboard.api import alerts as api_alerts -from dashboard.api import bugs from dashboard.api import config from dashboard.api import describe -from dashboard.api import list_timeseries -from dashboard.api import new_bug -from dashboard.api import new_pinpoint -from dashboard.api import existing_bug -from dashboard.api import nudge_alert -from dashboard.api import report_generate -from dashboard.api import report_names -from dashboard.api import report_template from dashboard.api import test_suites -from dashboard.api import timeseries from dashboard.api import timeseries2 +from dashboard.common import datastore_hooks + +google.cloud.logging.Client().setup_logging(log_level=logging.DEBUG) +logging.getLogger("urllib3").setLevel(logging.INFO) + +datastore_hooks.InstallHooks() + +flask_app = Flask(__name__) + +flask_app.wsgi_app = wrap_wsgi_app(flask_app.wsgi_app, use_deferred=True) + + +@flask_app.route('/') +def MainHandlerGet(): + return main.MainHandlerGet() + + +@flask_app.route('/add_histograms', methods=['POST']) +def AddHistogramsPost(): + return add_histograms.AddHistogramsPost() + + +@flask_app.route('/add_histograms/process', methods=['POST']) +def AddHistogramsProcessPost(): + return add_histograms.AddHistogramsProcessPost() + + +@flask_app.route('/add_histograms_queue', methods=['GET', 'POST']) +def AddHistogramsQueuePost(): + return add_histograms_queue.AddHistogramsQueuePost() + + +@flask_app.route('/add_point', methods=['POST']) +def AddPointPost(): + return add_point.AddPointPost() + + +@flask_app.route('/add_point_queue', methods=['GET', 'POST']) +def AddPointQueuePost(): + return add_point_queue.AddPointQueuePost() + + +@flask_app.route('/alert_groups_update') +def AlertGroupsGet(): + return alert_groups.AlertGroupsGet() + + +@flask_app.route('/alerts', methods=['GET']) +def AlertsHandlerGet(): + return alerts.AlertsHandlerGet() + + +@flask_app.route('/alerts', methods=['POST']) +def AlertsHandlerPost(): + return alerts.AlertsHandlerPost() + + +@flask_app.route('/associate_alerts', methods=['GET', 'POST']) +def AssociateAlertsHandlerPost(): + return associate_alerts.AssociateAlertsHandlerPost() + + +@flask_app.route('/api/alerts', methods=['POST', 'OPTIONS']) +def AlertsPost(): + return api_alerts.AlertsPost() + + +@flask_app.route('/api/config', methods=['POST']) +def ConfigHandlerPost(): + return config.ConfigHandlerPost() + + +@flask_app.route('/api/describe', methods=['POST', 'OPTIONS']) +def DescribePost(): + return describe.DescribePost() + + +@flask_app.route('/api/test_suites', methods=['POST', 'OPTIONS']) +def TestSuitesPost(): + return test_suites.TestSuitesPost() + + +@flask_app.route('/api/timeseries2', methods=['POST']) +def TimeSeries2Post(): + return timeseries2.TimeSeries2Post() + + +@flask_app.route('/buildbucket_job_status/') +def BuildbucketJobStatusGet(job_id): + return buildbucket_job_status.BuildbucketJobStatusGet(job_id) + + +@flask_app.route('/delete_expired_entities') +def LayeredCacheDeleteExpiredGet(): + return layered_cache_delete_expired.LayeredCacheDeleteExpiredGet() + + +@flask_app.route('/dump_graph_json', methods=['GET']) +def DumpGraphJsonHandler(): + return dump_graph_json.DumpGraphJsonHandlerGet() + + +@flask_app.route('/edit_anomalies', methods=['POST']) +def EditAnomaliesPost(): + return edit_anomalies.EditAnomaliesPost() + + +@flask_app.route('/edit_site_config', methods=['GET']) +def EditSiteConfigHandlerGet(): + return edit_site_config.EditSiteConfigHandlerGet() + + +@flask_app.route('/edit_site_config', methods=['POST']) +def EditSiteConfigHandlerPost(): + return edit_site_config.EditSiteConfigHandlerPost() + + +@flask_app.route('/file_bug', methods=['GET', 'POST']) +def FileBugHandlerGet(): + return file_bug.FileBugHandlerGet() + + +@flask_app.route('/graph_csv', methods=['GET']) +def GraphCSVHandlerGet(): + return graph_csv.GraphCSVGet() + + +@flask_app.route('/graph_csv', methods=['POST']) +def GraphCSVHandlerPost(): + return graph_csv.GraphCSVPost() + + +@flask_app.route('/graph_json', methods=['POST']) +def GraphJsonPost(): + return graph_json.GraphJsonPost() + + +@flask_app.route('/graph_revisions', methods=['POST']) +def GraphRevisionsPost(): + return graph_revisions.GraphRevisionsPost() + + +@flask_app.route('/group_report', methods=['GET']) +def GroupReportGet(): + return group_report.GroupReportGet() + + +@flask_app.route('/group_report', methods=['POST']) +def GroupReportPost(): + return group_report.GroupReportPost() + + +@flask_app.route('/list_tests', methods=['POST']) +def ListTestsHandlerPost(): + return list_tests.ListTestsHandlerPost() + + +@flask_app.route('/load_from_prod', methods=['GET', 'POST']) +def LoadFromProdHandler(): + return load_from_prod.LoadFromProdHandlerGetPost() + + +@flask_app.route('/mark_recovered_alerts', methods=['GET', 'POST']) +def MarkRecoveredAlertsPost(): + return mark_recovered_alerts.MarkRecoveredAlertsPost() + + +@flask_app.route('/migrate_test_names', methods=['GET']) +def MigrateTestNamesGet(): + return migrate_test_names.MigrateTestNamesGet() + + +@flask_app.route('/migrate_test_names', methods=['POST']) +def MigrateTestNamesPost(): + return migrate_test_names.MigrateTestNamesPost() + + +@flask_app.route('/migrate_test_names_tasks', methods=['POST']) +def MigrateTestNamesTasksPost(): + return migrate_test_names_tasks.MigrateTestNamesTasksPost() + + +@flask_app.route('/navbar', methods=['POST']) +def NavbarHandlerPost(): + return navbar.NavbarHandlerPost() + + +@flask_app.route('/pinpoint/new/bisect', methods=['POST']) +def PinpointNewBisectPost(): + return pinpoint_request.PinpointNewBisectPost() + + +@flask_app.route('/pinpoint/new/perf_try', methods=['POST']) +def PinpointNewPerfTryPost(): + return pinpoint_request.PinpointNewPerfTryPost() + + +@flask_app.route('/pinpoint/new/prefill', methods=['POST']) +def PinpointNewPrefillPost(): + return pinpoint_request.PinpointNewPrefillPost() + + +@flask_app.route('/configs/update') +def SheriffConfigPollerGet(): + return sheriff_config_poller.SheriffConfigPollerGet() + + +@flask_app.route('/report', methods=['GET']) +def ReportHandlerGet(): + return report.ReportHandlerGet() + + +@flask_app.route('/report', methods=['POST']) +def ReportHandlerPost(): + return report.ReportHandlerPost() + + +@flask_app.route('/short_uri', methods=['GET']) +def ShortUriHandlerGet(): + return short_uri.ShortUriHandlerGet() + + +@flask_app.route('/short_uri', methods=['POST']) +def ShortUriHandlerPost(): + return short_uri.ShortUriHandlerPost() + + +@flask_app.route('/update_dashboard_stats') +def UpdateDashboardStatsGet(): + return update_dashboard_stats.UpdateDashboardStatsGet() + + +@flask_app.route('/update_test_suites', methods=['GET','POST']) +def UpdateTestSuitesPost(): + return update_test_suites.UpdateTestSuitesPost() + + +@flask_app.route('/update_test_suite_descriptors', methods=['GET', 'POST']) +def UpdateTestSuitesDescriptorsPost(): + return update_test_suite_descriptors.UpdateTestSuiteDescriptorsPost() + + +@flask_app.route('/uploads/') +def UploadsInfoGet(token_id): + return uploads_info.UploadsInfoGet(token_id) + + +# Some handlers were identified as obsolete during the python 3 migration and +# thus were deleted. Though, we want to be aware of any client calls to those +# deleted endpoints in the future by adding logs here. +@flask_app.route( + '/bug_details', endpoint='/bug_details', methods=['GET', 'POST']) +@flask_app.route( + '/create_health_report', + endpoint='/create_health_report', + methods=['GET', 'POST']) +@flask_app.route( + '/get_diagnostics', endpoint='/get_diagnostics', methods=['POST']) +@flask_app.route('/get_histogram', endpoint='/get_histogram', methods=['POST']) +@flask_app.route( + '/put_entities_task', endpoint='/put_entities_task', methods=['POST']) +@flask_app.route( + '/speed_releasing', endpoint='/speed_releasing', methods=['GET', 'POST']) +@flask_app.route('/api/bugs/', endpoint='/api/bugs', methods=['POST']) +@flask_app.route( + '/api/bugs/p//', + endpoint='/api/bugs/p', + methods=['POST']) +@flask_app.route( + '/api/existing_bug', endpoint='/api/existing_bug', methods=['POST']) +@flask_app.route( + '/api/list_timeseries/', endpoint='/api/list_timeseries', methods=['POST']) +@flask_app.route('/api/new_bug', endpoint='/api/new_bug', methods=['POST']) +@flask_app.route( + '/api/new_pinpoint', endpoint='/api/new_pinpoint', methods=['POST']) +@flask_app.route( + '/api/nudge_alert', endpoint='/api/nudge_alert', methods=['POST']) +@flask_app.route( + '/api/report/generate', endpoint='/api/report/generate', methods=['POST']) +@flask_app.route( + '/api/report/names', endpoint='/api/report/names', methods=['POST']) +@flask_app.route( + '/api/report/template', endpoint='/api/report/template', methods=['POST']) +@flask_app.route( + '/api/timeseries/', endpoint='/api/timeseries', methods=['POST']) +def ObsoleteEndpointsHandler(bug_id=None, project_id=None): + del bug_id, project_id + obsolete_endpoint = flask_request.endpoint + logging.error( + 'Request on deleted endpoint: %s. ' + 'It was considered obsolete in Python 3 migration.', obsolete_endpoint) + + return make_response( + 'This endpoint is obsolete: %s. ' + 'Please contact browser-perf-engprod@google.com for more info.' % + obsolete_endpoint, 404) + -_URL_MAPPING = [ - ('/_/jstsmon', jstsmon.JsTsMonHandler), - ('/add_histograms', add_histograms.AddHistogramsHandler), - ('/add_histograms/process', add_histograms.AddHistogramsProcessHandler), - ('/add_histograms_queue', add_histograms_queue.AddHistogramsQueueHandler), - ('/add_point', add_point.AddPointHandler), - ('/add_point_queue', add_point_queue.AddPointQueueHandler), - ('/alerts', alerts.AlertsHandler), - (r'/api/alerts', api_alerts.AlertsHandler), - (r'/api/bugs/p/(.+)/(.+)', bugs.BugsWithProjectHandler), - (r'/api/bugs/(.*)', bugs.BugsHandler), - (r'/api/config', config.ConfigHandler), - (r'/api/describe', describe.DescribeHandler), - (r'/api/list_timeseries/(.*)', list_timeseries.ListTimeseriesHandler), - (r'/api/new_bug', new_bug.NewBugHandler), - (r'/api/new_pinpoint', new_pinpoint.NewPinpointHandler), - (r'/api/existing_bug', existing_bug.ExistingBugHandler), - (r'/api/nudge_alert', nudge_alert.NudgeAlertHandler), - (r'/api/report/generate', report_generate.ReportGenerateHandler), - (r'/api/report/names', report_names.ReportNamesHandler), - (r'/api/report/template', report_template.ReportTemplateHandler), - (r'/api/test_suites', test_suites.TestSuitesHandler), - (r'/api/timeseries/(.*)', timeseries.TimeseriesHandler), - (r'/api/timeseries2', timeseries2.Timeseries2Handler), - ('/associate_alerts', associate_alerts.AssociateAlertsHandler), - ('/alert_groups_update', alert_groups.AlertGroupsHandler), - ('/bug_details', bug_details.BugDetailsHandler), - (r'/buildbucket_job_status/(\d+)', - buildbucket_job_status.BuildbucketJobStatusHandler), - ('/create_health_report', create_health_report.CreateHealthReportHandler), - ('/configs/update', sheriff_config_poller.ConfigsUpdateHandler), - ('/delete_expired_entities', - layered_cache_delete_expired.LayeredCacheDeleteExpiredHandler), - ('/dump_graph_json', dump_graph_json.DumpGraphJsonHandler), - ('/edit_anomalies', edit_anomalies.EditAnomaliesHandler), - ('/edit_anomaly_configs', edit_anomaly_configs.EditAnomalyConfigsHandler), - ('/edit_bug_labels', edit_bug_labels.EditBugLabelsHandler), - ('/edit_site_config', edit_site_config.EditSiteConfigHandler), - ('/file_bug', file_bug.FileBugHandler), - ('/get_diagnostics', get_diagnostics.GetDiagnosticsHandler), - ('/get_histogram', get_histogram.GetHistogramHandler), - ('/graph_csv', graph_csv.GraphCsvHandler), - ('/graph_json', graph_json.GraphJsonHandler), - ('/graph_revisions', graph_revisions.GraphRevisionsHandler), - ('/group_report', group_report.GroupReportHandler), - ('/list_tests', list_tests.ListTestsHandler), - ('/load_from_prod', load_from_prod.LoadFromProdHandler), - ('/', main.MainHandler), - ('/mark_recovered_alerts', - mark_recovered_alerts.MarkRecoveredAlertsHandler), - ('/memory_report', memory_report.MemoryReportHandler), - ('/migrate_test_names', migrate_test_names.MigrateTestNamesHandler), - ('/navbar', navbar.NavbarHandler), - ('/pinpoint/new/bisect', pinpoint_request.PinpointNewBisectRequestHandler), - ('/pinpoint/new/perf_try', - pinpoint_request.PinpointNewPerfTryRequestHandler), - ('/pinpoint/new/prefill', - pinpoint_request.PinpointNewPrefillRequestHandler), - ('/put_entities_task', put_entities_task.PutEntitiesTaskHandler), - ('/report', report.ReportHandler), - ('/short_uri', short_uri.ShortUriHandler), - (r'/speed_releasing/(.*)', speed_releasing.SpeedReleasingHandler), - ('/speed_releasing', speed_releasing.SpeedReleasingHandler), - ('/update_dashboard_stats', - update_dashboard_stats.UpdateDashboardStatsHandler), - ('/update_test_suites', update_test_suites.UpdateTestSuitesHandler), - ('/update_test_suite_descriptors', - update_test_suite_descriptors.UpdateTestSuiteDescriptorsHandler), - ('/uploads/(.+)', uploads_info.UploadInfoHandler), - (oauth2_decorator.DECORATOR.callback_path, - oauth2_decorator.DECORATOR.callback_handler()) -] - -APP = webapp2.WSGIApplication(_URL_MAPPING, debug=False) -gae_ts_mon.initialize(APP) +def APP(environ, request): + return flask_app(environ, request) diff --git a/dashboard/dashboard/dispatcher_test.py b/dashboard/dashboard/dispatcher_test.py index bd43cd12650..927fee82b6e 100644 --- a/dashboard/dashboard/dispatcher_test.py +++ b/dashboard/dashboard/dispatcher_test.py @@ -14,13 +14,14 @@ class DispatcherTest(unittest.TestCase): def testImport(self): # load_from_prod requires this: - os.environ['APPLICATION_ID'] = 'test-dot-chromeperf' + os.environ['APPLICATION_ID'] = 'testbed-test' # gae_ts_mon requires these: os.environ['CURRENT_MODULE_ID'] = '' os.environ['CURRENT_VERSION_ID'] = '' - from dashboard import dispatcher + # pylint: disable=import-outside-toplevel + from dashboard import dispatcher # pylint: disable=unused-import if __name__ == '__main__': diff --git a/dashboard/dashboard/dump_graph_json.py b/dashboard/dashboard/dump_graph_json.py index 1d431969007..5e5fbbe6587 100644 --- a/dashboard/dashboard/dump_graph_json.py +++ b/dashboard/dashboard/dump_graph_json.py @@ -11,6 +11,7 @@ from __future__ import absolute_import import base64 +from flask import request, make_response import json from google.appengine.ext import ndb @@ -27,136 +28,140 @@ _DEFAULT_MAX_ANOMALIES = 30 -class DumpGraphJsonHandler(request_handler.RequestHandler): - """Handler for extracting entities from datastore.""" - - def get(self): - """Handles dumping dashboard data.""" - if self.request.get('sheriff'): - self._DumpAnomalyDataForSheriff() - elif self.request.get('test_path'): - self._DumpTestData() - else: - self.ReportError('No parameters specified.') - - def _DumpTestData(self): - """Dumps data for the requested test. - - Request parameters: - test_path: A single full test path, including master/bot. - num_points: Max number of Row entities (optional). - end_rev: Ending revision number, inclusive (optional). - - Outputs: - JSON array of encoded protobuf messages, which encode all of - the datastore entities relating to one test (including Master, Bot, - TestMetadata, Row, Anomaly and Sheriff entities). - """ - test_path = self.request.get('test_path') - num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS)) - end_rev = self.request.get('end_rev') - test_key = utils.TestKey(test_path) - if not test_key or test_key.kind() != 'TestMetadata': - # Bad test_path passed in. - self.response.out.write(json.dumps([])) - return - - # List of datastore entities that will be dumped. - entities = [] - - entities.extend(self._GetTestAncestors([test_key])) - - # Get the Row entities. +def DumpGraphJsonHandlerGet(): + if request.values.get('sheriff'): + sheriff_name = request.values.get('sheriff') + num_points = int(request.values.get('num_points', _DEFAULT_MAX_POINTS)) + num_anomalies = int( + request.values.get('num_alerts', _DEFAULT_MAX_ANOMALIES)) + + protobuf_json = _DumpAnomalyDataForSheriff(sheriff_name, num_points, + num_anomalies) + + return make_response(protobuf_json) + if request.values.get('test_path'): + test_path = request.values.get('test_path') + num_points = int(request.values.get('num_points', _DEFAULT_MAX_POINTS)) + end_rev = request.values.get('end_rev') + + protobuf_json = _DumpTestData(test_path, num_points, end_rev) + + return make_response(protobuf_json) + return request_handler.RequestHandlerReportError('No parameters specified.') + + +def _DumpTestData(test_path, num_points, end_rev): + """Dumps data for the requested test. + + Request parameters: + test_path: A single full test path, including master/bot. + num_points: Max number of Row entities (optional). + end_rev: Ending revision number, inclusive (optional). + + Outputs: + JSON array of encoded protobuf messages, which encode all of + the datastore entities relating to one test (including Master, Bot, + TestMetadata, Row, Anomaly and Sheriff entities). + """ + test_key = utils.TestKey(test_path) + if not test_key or test_key.kind() != 'TestMetadata': + # Bad test_path passed in. + return json.dumps([]) + + # List of datastore entities that will be dumped. + entities = [] + + entities.extend(_GetTestAncestors([test_key])) + + # Get the Row entities. + q = graph_data.Row.query() + q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) + if end_rev: + q = q.filter(graph_data.Row.revision <= int(end_rev)) + q = q.order(-graph_data.Row.revision) # pylint: disable=invalid-unary-operand-type + entities += q.fetch(limit=num_points) + + # Get the Anomaly and Sheriff entities. + alerts, _, _ = anomaly.Anomaly.QueryAsync(test=test_key).get_result() + subscriptions = [s for a in alerts for s in a.subscriptions] + entities += alerts + entities += subscriptions + + # Convert the entities to protobuf message strings and output as JSON. + protobuf_strings = list(map(EntityToBinaryProtobuf, entities)) + return json.dumps(protobuf_strings) + + +def _DumpAnomalyDataForSheriff(sheriff_name, num_points, num_anomalies): + """Dumps Anomaly data for all sheriffs. + + Request parameters: + sheriff: Sheriff name. + num_points: Max number of Row entities (optional). + num_alerts: Max number of Anomaly entities (optional). + + Outputs: + JSON array of encoded protobuf messages, which encode all of + the datastore entities relating to one test (including Master, Bot, + TestMetadata, Row, Anomaly and Sheriff entities). + """ + anomalies, _, _ = anomaly.Anomaly.QueryAsync( + subscriptions=[sheriff_name], limit=num_anomalies).get_result() + test_keys = [a.GetTestMetadataKey() for a in anomalies] + + # List of datastore entities that will be dumped. + entities = [] + + entities.extend(_GetTestAncestors(test_keys)) + + # Get the Row entities. + entities.extend(_FetchRowsAsync(test_keys, num_points)) + + # Add the Anomaly and Sheriff entities. + entities += anomalies + subscriptions = [s for a in anomalies for s in a.subscriptions] + entities += subscriptions + + # Convert the entities to protobuf message strings and output as JSON. + protobuf_strings = list(map(EntityToBinaryProtobuf, entities)) + return json.dumps(protobuf_strings) + + +def _GetTestAncestors(test_keys): + """Gets the TestMetadata, Bot, and Master entities preceding in path.""" + entities = [] + added_parents = set() + for test_key in test_keys: + if test_key.kind() != 'TestMetadata': + continue + parts = utils.TestPath(test_key).split('/') + for index, _, in enumerate(parts): + test_path = '/'.join(parts[:index + 1]) + if test_path in added_parents: + continue + added_parents.add(test_path) + if index == 0: + entities.append(ndb.Key('Master', parts[0]).get()) + elif index == 1: + entities.append(ndb.Key('Master', parts[0], 'Bot', parts[1]).get()) + else: + entities.append(ndb.Key('TestMetadata', test_path).get()) + return [e for e in entities if e is not None] + + +def _FetchRowsAsync(test_keys, num_points): + """Fetches recent Row asynchronously across all 'test_keys'.""" + rows = [] + futures = [] + for test_key in test_keys: q = graph_data.Row.query() q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) - if end_rev: - q = q.filter(graph_data.Row.revision <= int(end_rev)) - q = q.order(-graph_data.Row.revision) - entities += q.fetch(limit=num_points) - - # Get the Anomaly and Sheriff entities. - alerts, _, _ = anomaly.Anomaly.QueryAsync(test=test_key).get_result() - subscriptions = [s for a in alerts for s in a.subscriptions] - entities += alerts - entities += subscriptions - - # Convert the entities to protobuf message strings and output as JSON. - protobuf_strings = list(map(EntityToBinaryProtobuf, entities)) - self.response.out.write(json.dumps(protobuf_strings)) - - def _DumpAnomalyDataForSheriff(self): - """Dumps Anomaly data for all sheriffs. - - Request parameters: - sheriff: Sheriff name. - num_points: Max number of Row entities (optional). - num_alerts: Max number of Anomaly entities (optional). - - Outputs: - JSON array of encoded protobuf messages, which encode all of - the datastore entities relating to one test (including Master, Bot, - TestMetadata, Row, Anomaly and Sheriff entities). - """ - sheriff_name = self.request.get('sheriff') - num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS)) - num_anomalies = int(self.request.get('num_alerts', _DEFAULT_MAX_ANOMALIES)) - - anomalies, _, _ = anomaly.Anomaly.QueryAsync( - subscriptions=[sheriff_name], limit=num_anomalies).get_result() - test_keys = [a.GetTestMetadataKey() for a in anomalies] - - # List of datastore entities that will be dumped. - entities = [] - - entities.extend(self._GetTestAncestors(test_keys)) - - # Get the Row entities. - entities.extend(self._FetchRowsAsync(test_keys, num_points)) - - # Add the Anomaly and Sheriff entities. - entities += anomalies - subscriptions = [s for a in anomalies for s in a.subscriptions] - entities += subscriptions - - # Convert the entities to protobuf message strings and output as JSON. - protobuf_strings = list(map(EntityToBinaryProtobuf, entities)) - self.response.out.write(json.dumps(protobuf_strings)) - - def _GetTestAncestors(self, test_keys): - """Gets the TestMetadata, Bot, and Master entities preceding in path.""" - entities = [] - added_parents = set() - for test_key in test_keys: - if test_key.kind() != 'TestMetadata': - continue - parts = utils.TestPath(test_key).split('/') - for index, _, in enumerate(parts): - test_path = '/'.join(parts[:index + 1]) - if test_path in added_parents: - continue - added_parents.add(test_path) - if index == 0: - entities.append(ndb.Key('Master', parts[0]).get()) - elif index == 1: - entities.append(ndb.Key('Master', parts[0], 'Bot', parts[1]).get()) - else: - entities.append(ndb.Key('TestMetadata', test_path).get()) - return [e for e in entities if e is not None] - - def _FetchRowsAsync(self, test_keys, num_points): - """Fetches recent Row asynchronously across all 'test_keys'.""" - rows = [] - futures = [] - for test_key in test_keys: - q = graph_data.Row.query() - q = q.filter( - graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)) - q = q.order(-graph_data.Row.revision) - futures.append(q.fetch_async(limit=num_points)) - ndb.Future.wait_all(futures) - for future in futures: - rows.extend(future.get_result()) - return rows + q = q.order(-graph_data.Row.revision) # pylint: disable=invalid-unary-operand-type + futures.append(q.fetch_async(limit=num_points)) + ndb.Future.wait_all(futures) + for future in futures: + rows.extend(future.get_result()) + return rows def EntityToBinaryProtobuf(entity): diff --git a/dashboard/dashboard/dump_graph_json_test.py b/dashboard/dashboard/dump_graph_json_test.py index 5665d564ad8..94884958d02 100644 --- a/dashboard/dashboard/dump_graph_json_test.py +++ b/dashboard/dashboard/dump_graph_json_test.py @@ -6,10 +6,10 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json import unittest - -import webapp2 +import six import webtest from google.appengine.ext import ndb @@ -21,14 +21,21 @@ from dashboard.models import graph_data from dashboard.models.subscription import Subscription +flask_app = Flask(__name__) + + +@flask_app.route('/dump_graph_json', methods=['GET']) +def DumpGraphJsonHandler(): + return dump_graph_json.DumpGraphJsonHandlerGet() + +@unittest.skipIf(six.PY3, + 'Testing endpoint for dev_appserver only in Python 2.') class DumpGraphJsonTest(testing_common.TestCase): def setUp(self): - super(DumpGraphJsonTest, self).setUp() - app = webapp2.WSGIApplication([('/dump_graph_json', - dump_graph_json.DumpGraphJsonHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) def testGet_DumpJson_Basic(self): # Insert a test with no rows or alerts. @@ -71,8 +78,8 @@ def testGet_DumpJson_WithRows(self): out_rows = _EntitiesOfKind(entities, 'Row') expected_num_rows = dump_graph_json._DEFAULT_MAX_POINTS self.assertEqual(expected_num_rows, len(out_rows)) - expected_rev_range = range(highest_rev, highest_rev + 1 - expected_num_rows, - -1) + expected_rev_range = list( + range(highest_rev, highest_rev + 1 - expected_num_rows, -1)) for expected_rev, row in zip(expected_rev_range, out_rows): self.assertEqual(expected_rev, row.revision) self.assertEqual(expected_rev * 2, row.value) @@ -100,7 +107,7 @@ def testGet_DumpJson_WithRows(self): map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings)) out_rows = _EntitiesOfKind(entities, 'Row') rev_nums = [row.revision for row in out_rows] - expected_rev_range = range(highest_rev, highest_rev - 4, -1) + expected_rev_range = list(range(highest_rev, highest_rev - 4, -1)) self.assertEqual(expected_rev_range, rev_nums) def testDumpJsonWithAlertData(self): diff --git a/dashboard/dashboard/edit_anomalies.py b/dashboard/dashboard/edit_anomalies.py index a25f8ade828..9cf8a3e3172 100644 --- a/dashboard/dashboard/edit_anomalies.py +++ b/dashboard/dashboard/edit_anomalies.py @@ -15,13 +15,12 @@ from dashboard.common import utils from dashboard.common import xsrf +from flask import request, make_response -class EditAnomaliesHandler(request_handler.RequestHandler): - """Handles editing the bug IDs and revision range of Alerts.""" - @xsrf.TokenRequired - def post(self): - """Allows adding or resetting bug IDs and invalid statuses to Alerts. +@xsrf.TokenRequired +def EditAnomaliesPost(): + """Allows adding or resetting bug IDs and invalid statuses to Alerts. Additionally, this endpoint is also responsible for changing the start and end revisions of Anomaly entities. @@ -40,64 +39,67 @@ def post(self): "error" should be in the result. If successful, the response is still expected to be JSON. """ - if not utils.IsValidSheriffUser(): - user = users.get_current_user() - self.ReportError('User "%s" not authorized.' % user, status=403) - return - - # Get the list of alerts to modify. - urlsafe_keys = self.request.get('keys') - if not urlsafe_keys: - self.response.out.write( - json.dumps({'error': 'No alerts specified to add bugs to.'})) - return - keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] - alert_entities = ndb.get_multi(keys) - - # Get the parameters which specify the changes to make. - bug_id = self.request.get('bug_id') - new_start_revision = self.request.get('new_start_revision') - new_end_revision = self.request.get('new_end_revision') - result = None - if bug_id: - result = self.ChangeBugId(alert_entities, bug_id) - elif new_start_revision and new_end_revision: - result = self.NudgeAnomalies(alert_entities, new_start_revision, - new_end_revision) - else: - result = {'error': 'No bug ID or new revision specified.'} - self.response.out.write(json.dumps(result)) - - def ChangeBugId(self, alert_entities, bug_id): - """Changes or resets the bug ID of all given alerts.""" - # Change the bug ID if a new bug ID is specified and valid. - if bug_id == 'REMOVE': - bug_id = None - else: - try: - bug_id = int(bug_id) - except ValueError: - return {'error': 'Invalid bug ID %s' % str(bug_id)} - - for a in alert_entities: - a.bug_id = bug_id - - ndb.put_multi(alert_entities) - - return {'bug_id': bug_id} - - def NudgeAnomalies(self, anomaly_entities, start, end): - # Change the revision range if a new revision range is specified and valid. + if not utils.IsValidSheriffUser(): + user = users.get_current_user() + return request_handler.RequestHandlerReportError( + 'User "%s" not authorized.' % user, status=403) + + + # Get the list of alerts to modify. + urlsafe_keys = request.values.get('keys') + if not urlsafe_keys: + return make_response( + json.dumps({'error': 'No alerts specified to add bugs to.'})) + + keys = [ndb.Key(urlsafe=k) for k in urlsafe_keys.split(',')] + alert_entities = ndb.get_multi(keys) + + # Get the parameters which specify the changes to make. + bug_id = request.values.get('bug_id') + new_start_revision = request.values.get('new_start_revision') + new_end_revision = request.values.get('new_end_revision') + result = None + if bug_id: + result = ChangeBugId(alert_entities, bug_id) + elif new_start_revision and new_end_revision: + result = NudgeAnomalies(alert_entities, new_start_revision, + new_end_revision) + else: + result = {'error': 'No bug ID or new revision specified.'} + return make_response(json.dumps(result)) + + +def ChangeBugId(alert_entities, bug_id): + """Changes or resets the bug ID of all given alerts.""" + # Change the bug ID if a new bug ID is specified and valid. + if bug_id == 'REMOVE': + bug_id = None + else: try: - start = int(start) - end = int(end) + bug_id = int(bug_id) except ValueError: - return {'error': 'Invalid revisions %s, %s' % (start, end)} + return {'error': 'Invalid bug ID %s' % str(bug_id)} - for a in anomaly_entities: - a.start_revision = start - a.end_revision = end + for a in alert_entities: + a.bug_id = bug_id - ndb.put_multi(anomaly_entities) + ndb.put_multi(alert_entities) - return {'success': 'Alerts nudged.'} + return {'bug_id': bug_id} + + +def NudgeAnomalies(anomaly_entities, start, end): + # Change the revision range if a new revision range is specified and valid. + try: + start = int(start) + end = int(end) + except ValueError: + return {'error': 'Invalid revisions %s, %s' % (start, end)} + + for a in anomaly_entities: + a.start_revision = start + a.end_revision = end + + ndb.put_multi(anomaly_entities) + + return {'success': 'Alerts nudged.'} diff --git a/dashboard/dashboard/edit_anomalies_test.py b/dashboard/dashboard/edit_anomalies_test.py index 578a0273eef..0c8ba1e4fd4 100644 --- a/dashboard/dashboard/edit_anomalies_test.py +++ b/dashboard/dashboard/edit_anomalies_test.py @@ -6,11 +6,10 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import json -import unittest - import mock -import webapp2 +import unittest import webtest from google.appengine.api import users @@ -21,18 +20,23 @@ from dashboard.common import xsrf from dashboard.models import anomaly +flask_app = Flask(__name__) + + +@flask_app.route('/edit_anomalies', methods=['POST']) +def EditAnomaliesPost(): + return edit_anomalies.EditAnomaliesPost() + class EditAnomaliesTest(testing_common.TestCase): def setUp(self): - super(EditAnomaliesTest, self).setUp() - app = webapp2.WSGIApplication([('/edit_anomalies', - edit_anomalies.EditAnomaliesHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetSheriffDomains(['chromium.org']) def tearDown(self): - super(EditAnomaliesTest, self).tearDown() + super().tearDown() self.UnsetCurrentUser() def _AddAnomaliesToDataStore(self): @@ -63,8 +67,12 @@ def testPost_NoXSRFToken_Returns403Error(self): anomaly_keys = self._AddAnomaliesToDataStore() self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'bug_id': 31337, + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'bug_id': + 31337, }, status=403) self.assertIsNone(anomaly_keys[0].get().bug_id) @@ -75,9 +83,14 @@ def testPost_LoggedIntoInvalidDomain_DoesNotModifyAnomaly(self): self.SetCurrentUser('foo@bar.com') self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'bug_id': 31337, - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'bug_id': + 31337, + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }, status=403) self.assertIsNone(anomaly_keys[0].get().bug_id) @@ -87,9 +100,14 @@ def testPost_LoggedIntoValidSheriffAccount_ChangesBugID(self): self.SetCurrentUser('sullivan@chromium.org') self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'bug_id': 31337, - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'bug_id': + 31337, + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertEqual(31337, anomaly_keys[0].get().bug_id) @@ -101,9 +119,14 @@ def testPost_RemoveBug(self): a.put() self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'bug_id': 'REMOVE', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'bug_id': + 'REMOVE', + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertIsNone(anomaly_keys[0].get().bug_id) @@ -115,9 +138,14 @@ def testPost_ChangeBugIDToInvalidID_ReturnsError(self): a.put() response = self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'bug_id': 'a', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'bug_id': + 'a', + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertEqual({'error': 'Invalid bug ID a'}, json.loads(response.body)) self.assertEqual(12345, anomaly_keys[0].get().bug_id) @@ -139,10 +167,16 @@ def testPost_ChangeRevisions(self): self.SetCurrentUser('sullivan@chromium.org') self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'new_start_revision': '123450', - 'new_end_revision': '123455', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'new_start_revision': + '123450', + 'new_end_revision': + '123455', + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertEqual(123450, anomaly_keys[0].get().start_revision) self.assertEqual(123455, anomaly_keys[0].get().end_revision) @@ -154,10 +188,16 @@ def testPost_NudgeWithInvalidRevisions_ReturnsError(self): end = anomaly_keys[0].get().end_revision response = self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'new_start_revision': 'a', - 'new_end_revision': 'b', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'new_start_revision': + 'a', + 'new_end_revision': + 'b', + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertEqual(start, anomaly_keys[0].get().start_revision) self.assertEqual(end, anomaly_keys[0].get().end_revision) @@ -169,9 +209,14 @@ def testPost_IncompleteParametersGiven_ReturnsError(self): self.SetCurrentUser('sullivan@chromium.org') response = self.testapp.post( '/edit_anomalies', { - 'keys': json.dumps([anomaly_keys[0].urlsafe()]), - 'new_start_revision': '123', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), + 'keys': + json.dumps( + utils.ConvertBytesBeforeJsonDumps( + [anomaly_keys[0].urlsafe()])), + 'new_start_revision': + '123', + 'xsrf_token': + xsrf.GenerateToken(users.get_current_user()), }) self.assertEqual({'error': 'No bug ID or new revision specified.'}, json.loads(response.body)) diff --git a/dashboard/dashboard/edit_anomaly_configs.py b/dashboard/dashboard/edit_anomaly_configs.py deleted file mode 100644 index e6a598f54b2..00000000000 --- a/dashboard/dashboard/edit_anomaly_configs.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""Provides the web interface for editing anomaly threshold configurations.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from dashboard import edit_config_handler -from dashboard.common import request_handler -from dashboard.models import anomaly_config - - -class EditAnomalyConfigsHandler(edit_config_handler.EditConfigHandler): - """Handles editing the info about anomaly threshold configurations. - - The post method is inherited from EditConfigHandler. It takes the request - parameters documented there, as well as the following parameter, which - is a property of AnomalyConfig: - config: A JSON dictionary mapping config parameters to values. - """ - - def __init__(self, request, response): - super(EditAnomalyConfigsHandler, - self).__init__(request, response, anomaly_config.AnomalyConfig) - - def get(self): - """Renders the UI with the form.""" - - # Note, this is similar to edit_sheriffs, and there may be some common - # logic that oculd be extracted to EditConfigHandler. - def ConfigData(config): - return { - 'config': json.dumps(config.config, indent=2, sort_keys=True), - 'patterns': '\n'.join(sorted(config.patterns)), - } - - anomaly_configs = { - config.key.string_id(): ConfigData(config) - for config in anomaly_config.AnomalyConfig.query() - } - - self.RenderHtml( - 'edit_anomaly_configs.html', { - 'anomaly_config_json': json.dumps(anomaly_configs), - 'anomaly_config_names': sorted(anomaly_configs.keys()), - }) - - def _UpdateFromRequestParameters(self, anomaly_config_entity): - """Updates the given AnomalyConfig based on query parameters.""" - # This overrides the method in the superclass. - anomaly_config_entity.config = self._GetAndValidateConfigContents() - - def _GetAndValidateConfigContents(self): - """Returns a config dict if one could be gotten, or None otherwise.""" - config = self.request.get('config') - if not config: - raise request_handler.InvalidInputError('No config contents given.') - try: - config_dict = json.loads(config) - except (ValueError, TypeError) as json_parse_error: - raise request_handler.InvalidInputError(str(json_parse_error)) - if not isinstance(config_dict, dict): - raise request_handler.InvalidInputError('Config was not a dict.') - return config_dict diff --git a/dashboard/dashboard/edit_anomaly_configs_test.py b/dashboard/dashboard/edit_anomaly_configs_test.py deleted file mode 100644 index 16771da2d2d..00000000000 --- a/dashboard/dashboard/edit_anomaly_configs_test.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import unittest - -import webapp2 -import webtest - -from google.appengine.api import users - -from dashboard import edit_anomaly_configs -from dashboard import edit_config_handler -from dashboard import list_tests -from dashboard import put_entities_task -from dashboard.common import testing_common -from dashboard.common import xsrf -from dashboard.models import anomaly_config -from dashboard.models import graph_data - - -class EditAnomalyConfigsTest(testing_common.TestCase): - - # This test case tests post requests to /edit_anomaly_configs. - # Each post request is either a request to add an entity or to edit one. - - def setUp(self): - super(EditAnomalyConfigsTest, self).setUp() - app = webapp2.WSGIApplication([ - ('/edit_anomaly_configs', - edit_anomaly_configs.EditAnomalyConfigsHandler), - ('/put_entities_task', put_entities_task.PutEntitiesTaskHandler), - ]) - self.testapp = webtest.TestApp(app) - - def tearDown(self): - super(EditAnomalyConfigsTest, self).tearDown() - self.UnsetCurrentUser() - - def testAdd(self): - """Tests changing the config property of an existing AnomalyConfig.""" - self.SetCurrentUser('qyearsley@chromium.org', is_admin=True) - - self.testapp.post( - '/edit_anomaly_configs', { - 'add-edit': 'add', - 'add-name': 'New Config', - 'config': '{"foo": 10}', - 'patterns': 'M/b/ts/*\nM/b/ts/*/*\n', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - - anomaly_configs = anomaly_config.AnomalyConfig.query().fetch() - self.assertEqual(len(anomaly_configs), 1) - config = anomaly_configs[0] - self.assertEqual('New Config', config.key.string_id()) - self.assertEqual({'foo': 10}, config.config) - self.assertEqual(['M/b/ts/*', 'M/b/ts/*/*'], config.patterns) - - def testEdit(self): - """Tests changing the config property of an existing AnomalyConfig.""" - self.SetCurrentUser('sullivan@chromium.org', is_admin=True) - anomaly_config.AnomalyConfig( - id='Existing Config', config={ - 'old': 11 - }, patterns=['MyMaster/*/*/*']).put() - - self.testapp.post( - '/edit_anomaly_configs', { - 'add-edit': 'edit', - 'edit-name': 'Existing Config', - 'config': '{"new": 10}', - 'patterns': 'MyMaster/*/*/*', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - - anomaly_configs = anomaly_config.AnomalyConfig.query().fetch() - self.assertEqual(len(anomaly_configs), 1) - self.assertEqual('Existing Config', anomaly_configs[0].key.string_id()) - self.assertEqual({'new': 10}, anomaly_configs[0].config) - self.assertEqual(['MyMaster/*/*/*'], anomaly_configs[0].patterns) - - def testEdit_AddPattern(self): - """Tests changing the patterns list of an existing AnomalyConfig.""" - self.SetCurrentUser('sullivan@chromium.org', is_admin=True) - master = graph_data.Master(id='TheMaster').put() - graph_data.Bot(id='TheBot', parent=master).put() - suite1 = graph_data.TestMetadata(id='TheMaster/TheBot/Suite1') - suite1.UpdateSheriff() - suite1 = suite1.put() - - suite2 = graph_data.TestMetadata(id='TheMaster/TheBot/Suite2') - suite2.UpdateSheriff() - suite2 = suite2.put() - - test_aaa = graph_data.TestMetadata( - id='TheMaster/TheBot/Suite1/aaa', has_rows=True) - test_aaa.UpdateSheriff() - test_aaa = test_aaa.put() - - test_bbb = graph_data.TestMetadata( - id='TheMaster/TheBot/Suite1/bbb', has_rows=True) - test_bbb.UpdateSheriff() - test_bbb = test_bbb.put() - - test_ccc = graph_data.TestMetadata( - id='TheMaster/TheBot/Suite1/ccc', has_rows=True) - test_ccc.UpdateSheriff() - test_ccc = test_ccc.put() - - test_ddd = graph_data.TestMetadata( - id='TheMaster/TheBot/Suite2/ddd', has_rows=True) - test_ddd.UpdateSheriff() - test_ddd = test_ddd.put() - - anomaly_config.AnomalyConfig(id='1-Suite1-specific', config={'a': 10}).put() - anomaly_config.AnomalyConfig(id='2-Suite1-general', config={'b': 20}).put() - - self.testapp.post( - '/edit_anomaly_configs', { - 'add-edit': 'edit', - 'edit-name': '1-Suite1-specific', - 'config': '{"a": 10}', - 'patterns': '*/*/Suite1/aaa', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - self.ExecuteTaskQueueTasks('/put_entities_task', - edit_config_handler._TASK_QUEUE_NAME) - self.testapp.post( - '/edit_anomaly_configs', { - 'add-edit': 'edit', - 'edit-name': '2-Suite1-general', - 'config': '{"b": 20}', - 'patterns': '*/*/Suite1/*', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - self.ExecuteDeferredTasks('default') - self.ExecuteTaskQueueTasks('/put_entities_task', - edit_config_handler._TASK_QUEUE_NAME) - - # The lists of test patterns in the AnomalyConfig entities in the datastore - # should be set based on what was added in the two requests above. - self.assertEqual( - ['*/*/Suite1/*'], - anomaly_config.AnomalyConfig.get_by_id('2-Suite1-general').patterns) - self.assertEqual( - ['*/*/Suite1/aaa'], - anomaly_config.AnomalyConfig.get_by_id('1-Suite1-specific').patterns) - - # The 1-Suite1-specific config applies instead of the other config - # because its name comes first according to sort order. - self.assertEqual('1-Suite1-specific', - test_aaa.get().overridden_anomaly_config.string_id()) - # The 2-Suite1-specific config applies to the other tests under Suite1. - self.assertEqual('2-Suite1-general', - test_bbb.get().overridden_anomaly_config.string_id()) - self.assertEqual('2-Suite1-general', - test_ccc.get().overridden_anomaly_config.string_id()) - - # Note that Suite2/ddd has no config, and nor do the parent tests. - self.assertIsNone(test_ddd.get().overridden_anomaly_config) - self.assertIsNone(suite1.get().overridden_anomaly_config) - self.assertIsNone(suite2.get().overridden_anomaly_config) - - def testEdit_RemovePattern(self): - """Tests removing a pattern from an AnomalyConfig.""" - self.SetCurrentUser('sullivan@chromium.org', is_admin=True) - anomaly_config_key = anomaly_config.AnomalyConfig( - id='Test Config', config={ - 'a': 10 - }, patterns=['*/*/one', '*/*/two']).put() - master = graph_data.Master(id='TheMaster').put() - graph_data.Bot(id='TheBot', parent=master).put() - test_one = graph_data.TestMetadata( - id='TheMaster/TheBot/one', - overridden_anomaly_config=anomaly_config_key, - has_rows=True) - test_one.UpdateSheriff() - test_one = test_one.put() - - test_two = graph_data.TestMetadata( - id='TheMaster/TheBot/two', - overridden_anomaly_config=anomaly_config_key, - has_rows=True) - test_two.UpdateSheriff() - test_two = test_two.put() - - # Verify the state of the data before making the request. - self.assertEqual(['*/*/one', '*/*/two'], anomaly_config_key.get().patterns) - self.assertEqual(['TheMaster/TheBot/one'], - list_tests.GetTestsMatchingPattern('*/*/one')) - - self.testapp.post( - '/edit_anomaly_configs', { - 'add-edit': 'edit', - 'edit-name': 'Test Config', - 'config': '{"a": 10}', - 'patterns': ['*/*/two'], - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - self.ExecuteDeferredTasks('default') - self.ExecuteTaskQueueTasks('/put_entities_task', - edit_config_handler._TASK_QUEUE_NAME) - - self.assertEqual(['*/*/two'], anomaly_config_key.get().patterns) - self.assertIsNone(test_one.get().overridden_anomaly_config) - self.assertEqual('Test Config', - test_two.get().overridden_anomaly_config.string_id()) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/edit_bug_labels.py b/dashboard/dashboard/edit_bug_labels.py deleted file mode 100644 index 283de335b92..00000000000 --- a/dashboard/dashboard/edit_bug_labels.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""Provides the web interface for adding and removing bug labels.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import json - -from dashboard.common import request_handler -from dashboard.common import xsrf -from dashboard.models import bug_label_patterns - - -class EditBugLabelsHandler(request_handler.RequestHandler): - """Handles editing the info about perf sheriff rotations.""" - - def get(self): - """Renders the UI with all of the forms.""" - patterns_dict = bug_label_patterns.GetBugLabelPatterns() - self.RenderHtml( - 'edit_bug_labels.html', { - 'bug_labels': - sorted(patterns_dict), - 'bug_labels_json': - json.dumps(patterns_dict, indent=2, sort_keys=True) - }) - - @xsrf.TokenRequired - def post(self): - """Updates the sheriff configurations. - - Each form on the edit sheriffs page has a hidden field called action, which - tells us which form was submitted. The other particular parameters that are - expected depend on which form was submitted. - """ - action = self.request.get('action') - if action == 'add_buglabel_pattern': - self._AddBuglabelPattern() - if action == 'remove_buglabel_pattern': - self._RemoveBuglabelPattern() - - def _AddBuglabelPattern(self): - """Adds a bug label to be added to a group of tests. - - Request parameters: - buglabel_to_add: The bug label, which is a BugLabelPattern entity name. - pattern: A test path pattern. - """ - label = self.request.get('buglabel_to_add') - pattern = self.request.get('pattern') - bug_label_patterns.AddBugLabelPattern(label, pattern) - self.RenderHtml( - 'result.html', { - 'headline': 'Added label %s' % label, - 'results': [{ - 'name': 'Pattern', - 'value': pattern - }] - }) - - def _RemoveBuglabelPattern(self): - """Removes a BugLabelPattern so that the label no longer applies. - - Request parameters: - buglabel_to_remove: The bug label, which is the name of a - BugLabelPattern entity. - """ - label = self.request.get('buglabel_to_remove') - bug_label_patterns.RemoveBugLabel(label) - self.RenderHtml('result.html', {'headline': 'Deleted label %s' % label}) diff --git a/dashboard/dashboard/edit_bug_labels_test.py b/dashboard/dashboard/edit_bug_labels_test.py deleted file mode 100644 index 08d2a773016..00000000000 --- a/dashboard/dashboard/edit_bug_labels_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import unittest - -import webapp2 -import webtest - -from google.appengine.api import users - -from dashboard import edit_bug_labels -from dashboard.common import testing_common -from dashboard.common import xsrf -from dashboard.models import bug_label_patterns - - -class EditBugLabelsTest(testing_common.TestCase): - - def setUp(self): - super(EditBugLabelsTest, self).setUp() - app = webapp2.WSGIApplication([('/edit_bug_labels', - edit_bug_labels.EditBugLabelsHandler)]) - self.testapp = webtest.TestApp(app) - # Set the current user to be an admin. - self.SetCurrentUser('x@google.com', is_admin=True) - - def tearDown(self): - super(EditBugLabelsTest, self).tearDown() - self.UnsetCurrentUser() - - def testBugLabelPattern_AddAndRemove(self): - self.testapp.post( - '/edit_bug_labels', { - 'action': 'add_buglabel_pattern', - 'buglabel_to_add': 'Performance-1', - 'pattern': '*/*/Suite1/*', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - - # The list of patterns should now contain the pattern that was added. - self.assertEqual(['*/*/Suite1/*'], - bug_label_patterns.GetBugLabelPatterns()['Performance-1']) - - # Add another pattern for the same bug label. - self.testapp.post( - '/edit_bug_labels', { - 'action': 'add_buglabel_pattern', - 'buglabel_to_add': 'Performance-1', - 'pattern': '*/*/Suite2/*', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - - # The list of patterns should now contain both patterns. - self.assertEqual(['*/*/Suite1/*', '*/*/Suite2/*'], - bug_label_patterns.GetBugLabelPatterns()['Performance-1']) - - # Remove the BugLabelPattern entity. - self.testapp.post( - '/edit_bug_labels', { - 'action': 'remove_buglabel_pattern', - 'buglabel_to_remove': 'Performance-1', - 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), - }) - - # It should now be absent from the datastore. - self.assertNotIn('Performance-1', bug_label_patterns.GetBugLabelPatterns()) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/edit_config_handler.py b/dashboard/dashboard/edit_config_handler.py deleted file mode 100644 index 18b93dad5ad..00000000000 --- a/dashboard/dashboard/edit_config_handler.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -"""A common base class for pages that are used to edit configs.""" -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import -from future_builtins import map # pylint: disable=redefined-builtin - -import functools -import itertools -import json -import operator - -from google.appengine.api import app_identity -from google.appengine.api import mail -from google.appengine.api import taskqueue -from google.appengine.api import users -from google.appengine.ext import deferred - -from dashboard import list_tests -from dashboard.common import request_handler -from dashboard.common import utils -from dashboard.common import xsrf - -# Max number of entities to put in one request to /put_entities_task. -_MAX_TESTS_TO_PUT_AT_ONCE = 25 - -# The queue to use to re-put tests. Should be present in queue.yaml. -_TASK_QUEUE_NAME = 'edit-sheriffs-queue' - -# Minimum time before starting tasks, in seconds. It appears that the tasks -# may be executed before the sheriff is saved, so this is a workaround for that. -# See http://crbug.com/621499 -_TASK_QUEUE_COUNTDOWN = 60 - -_NUM_PATTERNS_PER_TASK = 10 - -_NOTIFICATION_EMAIL_BODY = """ -The configuration of %(hostname)s was changed by %(user)s. - -Key: %(key)s - -New test path patterns: -%(new_test_path_patterns)s - -Old test path patterns -%(old_test_path_patterns)s -""" - -# The mailing list to which config change notifications are sent, -# so that the team can keep an audit record of these changes. -# The "gasper-alerts" address is a historic legacy and not important. -_NOTIFICATION_ADDRESS = 'chrome-performance-monitoring-alerts@google.com' -_SENDER_ADDRESS = 'gasper-alerts@google.com' - - -class EditConfigHandler(request_handler.RequestHandler): - """Base class for handlers that are used to add or edit entities. - - Specifically, this is a common base class for EditSheriffsHandler - and EditAnomalyConfigsHandler. Both of these kinds of entities - represent a configuration that can apply to a set of tests, where - the set of tests is specified with a list of test path patterns. - """ - - # The webapp2 docs say that custom __init__ methods should call initialize() - # at the beginning of the method (rather than calling super __init__). See: - # https://webapp-improved.appspot.com/guide/handlers.html#overriding-init - # pylint: disable=super-init-not-called - def __init__(self, request, response, model_class): - """Constructs a handler object for editing entities of the given class. - - Args: - request: Request object (implicitly passed in by webapp2). - response: Response object (implicitly passed in by webapp2). - model_class: A subclass of ndb.Model. - """ - self.initialize(request, response) - self._model_class = model_class - - @xsrf.TokenRequired - def post(self): - """Updates the user-selected anomaly threshold configuration. - - Request parameters: - add-edit: Either 'add' if adding a new config, or 'edit'. - add-name: A new anomaly config name, if adding one. - edit-name: An existing anomaly config name, if editing one. - patterns: Newline-separated list of test path patterns to monitor. - - Depending on the specific sub-class, this will also take other - parameters for specific properties of the entity being edited. - """ - try: - edit_type = self.request.get('add-edit') - if edit_type == 'add': - self._AddEntity() - elif edit_type == 'edit': - self._EditEntity() - else: - raise request_handler.InvalidInputError('Invalid value for add-edit.') - except request_handler.InvalidInputError as error: - message = str(error) + ' Model class: ' + self._model_class.__name__ - self.RenderHtml('result.html', {'errors': [message]}) - - def _AddEntity(self): - """Adds adds a new entity according to the request parameters.""" - name = self.request.get('add-name') - if not name: - raise request_handler.InvalidInputError('No name given when adding new ') - if self._model_class.get_by_id(name): - raise request_handler.InvalidInputError( - 'Entity "%s" already exists, cannot add.' % name) - entity = self._model_class(id=name) - self._UpdateAndReportResults(entity) - - def _EditEntity(self): - """Edits an existing entity according to the request parameters.""" - name = self.request.get('edit-name') - if not name: - raise request_handler.InvalidInputError('No name given.') - entity = self._model_class.get_by_id(name) - if not entity: - raise request_handler.InvalidInputError( - 'Entity "%s" does not exist, cannot edit.' % name) - self._UpdateAndReportResults(entity) - - def _UpdateAndReportResults(self, entity): - """Updates the entity and reports the results of this updating.""" - new_patterns = _SplitPatternLines(self.request.get('patterns')) - old_patterns = entity.patterns - entity.patterns = new_patterns - self._UpdateFromRequestParameters(entity) - entity.put() - - self._RenderResults(entity, new_patterns, old_patterns) - self._QueueChangeTestPatternsAndEmail(entity, new_patterns, old_patterns) - - def _QueueChangeTestPatternsAndEmail(self, entity, new_patterns, - old_patterns): - deferred.defer(_QueueChangeTestPatternsTasks, old_patterns, new_patterns) - - user_email = users.get_current_user().email() - subject = 'Added or updated %s: %s by %s' % ( - self._model_class.__name__, entity.key.string_id(), user_email) - email_key = entity.key.string_id() - - email_body = _NOTIFICATION_EMAIL_BODY % { - 'key': - email_key, - 'new_test_path_patterns': - json.dumps( - list(new_patterns), - indent=2, - sort_keys=True, - separators=(',', ': ')), - 'old_test_path_patterns': - json.dumps( - list(old_patterns), - indent=2, - sort_keys=True, - separators=(',', ': ')), - 'hostname': - app_identity.get_default_version_hostname(), - 'user': - user_email, - } - mail.send_mail( - sender=_SENDER_ADDRESS, - to=_NOTIFICATION_ADDRESS, - subject=subject, - body=email_body) - - def _UpdateFromRequestParameters(self, entity): - """Updates the given entity based on query parameters. - - This method does not need to put() the entity. - - Args: - entity: The entity to update. - """ - raise NotImplementedError() - - def _RenderResults(self, entity, new_patterns, old_patterns): - """Outputs results using the results.html template. - - Args: - entity: The entity that was edited. - new_patterns: New test patterns that this config now applies to. - old_patterns: Old Test patterns that this config no longer applies to. - """ - - def ResultEntry(name, value): - """Returns an entry in the results lists to embed on result.html.""" - return {'name': name, 'value': value, 'class': 'results-pre'} - - self.RenderHtml( - 'result.html', { - 'headline': ('Added or updated %s "%s".' % - (self._model_class.__name__, entity.key.string_id())), - 'results': [ - ResultEntry('Entity', str(entity)), - ResultEntry('New Patterns', '\n'.join(new_patterns)), - ResultEntry('Old Patterns', '\n'.join(old_patterns)), - ] - }) - - -def _SplitPatternLines(patterns_string): - """Splits up the given newline-separated patterns and validates them.""" - test_path_patterns = sorted(p for p in patterns_string.splitlines() if p) - _ValidatePatterns(test_path_patterns) - return test_path_patterns - - -def _ValidatePatterns(test_path_patterns): - """Raises an exception if any test path patterns are invalid.""" - for pattern in test_path_patterns: - if not _IsValidTestPathPattern(pattern): - raise request_handler.InvalidInputError( - 'Invalid test path pattern: "%s"' % pattern) - - -def _IsValidTestPathPattern(test_path_pattern): - """Checks whether the given test path pattern string is OK.""" - if '[' in test_path_pattern or ']' in test_path_pattern: - return False - # Valid test paths will have a Master, bot, and test suite, and will - # generally have a chart name and trace name after that. - return len(test_path_pattern.split('/')) >= 3 - - -def _QueueChangeTestPatternsTasks(old_patterns, new_patterns): - """Updates tests that are different between old_patterns and new_patterns. - - The two arguments both represent sets of test paths (i.e. sets of data - series). Any tests that are different between these two sets need to be - updated. - - Some properties of TestMetadata entities are updated when they are put in the - |_pre_put_hook| method of TestMetadata, so any TestMetadata entity that might - need to be updated should be re-put. - - Args: - old_patterns: An iterable of test path pattern strings. - new_patterns: Another iterable of test path pattern strings. - - Returns: - A pair (added_test_paths, removed_test_paths), which are, respectively, - the test paths that are in the new set but not the old, and those that - are in the old set but not the new. - """ - added_patterns, removed_patterns = _ComputeDeltas(old_patterns, new_patterns) - patterns = list(added_patterns) + list(removed_patterns) - - def Chunks(seq, size): - for i in itertools.count(0, size): - if i < len(seq): - yield seq[i:i + size] - else: - break - - for pattern_sublist in Chunks(patterns, _NUM_PATTERNS_PER_TASK): - deferred.defer(_GetTestPathsAndAddTask, pattern_sublist) - - -def _GetTestPathsAndAddTask(patterns): - test_paths = _AllTestPathsMatchingPatterns(patterns) - - _AddTestsToPutToTaskQueue(test_paths) - - -def _ComputeDeltas(old_items, new_items): - """Finds the added and removed items in a new set compared to an old one. - - Args: - old_items: A collection of existing items. Could be a list or set. - new_items: Another collection of items. - - Returns: - A pair of sets (added, removed). - """ - old, new = set(old_items), set(new_items) - return new - old, old - new - - -def _RemoveOverlapping(added_items, removed_items): - """Returns two sets of items with the common items removed.""" - added, removed = set(added_items), set(removed_items) - return added - removed, removed - added - - -def _AllTestPathsMatchingPatterns(patterns_list): - """Returns a list of all test paths matching the given list of patterns.""" - - def GetResult(future): - return set(future.get_result()) - - return sorted( - functools.reduce( - operator.ior, - map(GetResult, - map(list_tests.GetTestsMatchingPatternAsync, patterns_list)), - set())) - - -def _AddTestsToPutToTaskQueue(test_paths): - """Adds tests that we want to re-put in the datastore to a queue. - - We need to re-put the tests so that TestMetadata._pre_put_hook is run, so that - the sheriff or alert threshold config of the TestMetadata is updated. - - Args: - test_paths: List of test paths of tests to be re-put. - """ - futures = [] - queue = taskqueue.Queue(_TASK_QUEUE_NAME) - for start_index in range(0, len(test_paths), _MAX_TESTS_TO_PUT_AT_ONCE): - group = test_paths[start_index:start_index + _MAX_TESTS_TO_PUT_AT_ONCE] - urlsafe_keys = [utils.TestKey(t).urlsafe() for t in group] - t = taskqueue.Task( - url='/put_entities_task', - params={'keys': ','.join(urlsafe_keys)}, - countdown=_TASK_QUEUE_COUNTDOWN) - futures.append(queue.add_async(t)) - for f in futures: - f.get_result() diff --git a/dashboard/dashboard/edit_config_handler_test.py b/dashboard/dashboard/edit_config_handler_test.py deleted file mode 100644 index 7dc65a9b893..00000000000 --- a/dashboard/dashboard/edit_config_handler_test.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from __future__ import print_function -from __future__ import division -from __future__ import absolute_import - -import unittest - -import mock -import webapp2 -import webtest - -from google.appengine.ext import deferred - -from dashboard import edit_config_handler -from dashboard import put_entities_task -from dashboard.common import request_handler -from dashboard.common import testing_common -from dashboard.models import graph_data - - -class EditConfigHandlerTest(testing_common.TestCase): - - def setUp(self): - super(EditConfigHandlerTest, self).setUp() - app = webapp2.WSGIApplication([('/put_entities_task', - put_entities_task.PutEntitiesTaskHandler)]) - self.testapp = webtest.TestApp(app) - self.SetCurrentUser('foo@bar.com', is_admin=True) - - def _AddSampleTestData(self): - """Adds some sample data used in the tests below.""" - master = graph_data.Master(id='TheMaster').put() - graph_data.Bot(id='TheBot', parent=master).put() - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite1') - t.UpdateSheriff() - t.put() - - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite2') - t.UpdateSheriff() - t.put() - - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite1/aaa', has_rows=True) - t.UpdateSheriff() - t.put() - - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite1/bbb', has_rows=True) - t.UpdateSheriff() - t.put() - - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite2/ccc', has_rows=True) - t.UpdateSheriff() - t.put() - - t = graph_data.TestMetadata(id='TheMaster/TheBot/Suite2/ddd', has_rows=True) - t.UpdateSheriff() - t.put() - - def testSplitPatternLines_OnePattern(self): - # The SplitPatternLines function returns a list of patterns. - self.assertEqual([], edit_config_handler._SplitPatternLines('')) - self.assertEqual(['A/b/c'], edit_config_handler._SplitPatternLines('A/b/c')) - self.assertEqual(['A/b/c'], - edit_config_handler._SplitPatternLines('A/b/c\n\n')) - - def testSplitPatternLines_SortsPatterns(self): - # Re-ordering and extra newlines are ignored in patterns input. - self.assertEqual(['A/b/c/d', 'E/f/g/h'], - edit_config_handler._SplitPatternLines('A/b/c/d\nE/f/g/h')) - self.assertEqual(['A/b/c/d', 'E/f/g/h'], - edit_config_handler._SplitPatternLines('E/f/g/h\nA/b/c/d')) - self.assertEqual( - ['A/b/c/d', 'E/f/g/h'], - edit_config_handler._SplitPatternLines('A/b/c/d\n\nE/f/g/h\n')) - - def testSplitPatternLines_NoSlashes_RaisesError(self): - # A valid test path must contain a master, bot, and test part. - with self.assertRaises(request_handler.InvalidInputError): - edit_config_handler._SplitPatternLines('invalid') - - def testSplitPatternLines_HasBrackets_RaisesError(self): - # Strings with brackets in them cannot be valid test paths. - with self.assertRaises(request_handler.InvalidInputError): - edit_config_handler._SplitPatternLines('A/b/c/d/[e]') - - def testChangeTestPatterns_NoneValue_RaisesTypeError(self): - with self.assertRaises(TypeError): - edit_config_handler._QueueChangeTestPatternsTasks('a/b/c', None) - - @mock.patch.object(deferred, 'defer') - def testChangeTestPatterns_NoChange_ReturnsEmptySets(self, mock_defer): - edit_config_handler._QueueChangeTestPatternsTasks([], []) - self.assertFalse(mock_defer.called) - - edit_config_handler._QueueChangeTestPatternsTasks(['a/b/c'], ['a/b/c']) - self.assertFalse(mock_defer.called) - - @mock.patch.object(edit_config_handler, '_AddTestsToPutToTaskQueue') - def testChangeTestPatterns_OnlyAdd_ReturnsAddedAndEmptySet( - self, mock_add_tests): - self._AddSampleTestData() - - edit_config_handler._QueueChangeTestPatternsTasks( - ['*/*/*/bbb'], ['*/*/*/aaa', '*/*/*/bbb']) - - self.ExecuteDeferredTasks('default') - - mock_add_tests.assert_called_with(['TheMaster/TheBot/Suite1/aaa']) - - @mock.patch.object(edit_config_handler, '_AddTestsToPutToTaskQueue') - def testChangeTestPatterns_OnlyRemove_ReturnsEmptySetAndRemoved( - self, mock_add_tests): - self._AddSampleTestData() - - edit_config_handler._QueueChangeTestPatternsTasks( - ['*/*/*/aaa', '*/*/Suite1/bbb'], ['*/*/*/aaa']) - - self.ExecuteDeferredTasks('default') - - mock_add_tests.assert_called_with(['TheMaster/TheBot/Suite1/bbb']) - - @mock.patch.object(edit_config_handler, '_AddTestsToPutToTaskQueue') - def testChangeTestPatterns_RemoveAndAdd_ReturnsAddedAndRemoved( - self, mock_add_tests): - self._AddSampleTestData() - - edit_config_handler._QueueChangeTestPatternsTasks(['*/*/Suite2/*'], - ['*/*/*/aaa']) - - self.ExecuteDeferredTasks('default') - - mock_add_tests.assert_called_with([ - 'TheMaster/TheBot/Suite1/aaa', 'TheMaster/TheBot/Suite2/ccc', - 'TheMaster/TheBot/Suite2/ddd' - ]) - - @mock.patch.object(edit_config_handler, '_AddTestsToPutToTaskQueue') - def testChangeTestPatterns_CanTakeSetsAsArguments(self, mock_add_tests): - self._AddSampleTestData() - - edit_config_handler._QueueChangeTestPatternsTasks(set(), {'*/*/Suite1/aaa'}) - - self.ExecuteDeferredTasks('default') - - mock_add_tests.assert_called_with(['TheMaster/TheBot/Suite1/aaa']) - - def testComputeDeltas_Empty(self): - self.assertEqual((set(), set()), edit_config_handler._ComputeDeltas([], [])) - - def testComputeDeltas_OnlyAdded(self): - self.assertEqual(({'a'}, set()), - edit_config_handler._ComputeDeltas('bcd', 'abcd')) - - def testComputeDeltas_OnlyRemoved(self): - self.assertEqual((set(), {'a'}), - edit_config_handler._ComputeDeltas('abcd', 'bcd')) - - def testRemoveOverlapping_NoOverlap_ReturnsSameSet(self): - self.assertEqual(({1, 2, 3}, {4, 5, 6}), - edit_config_handler._RemoveOverlapping({1, 2, 3}, - {4, 5, 6})) - - def testRemoveOverlapping_SomeOverlap_ReturnsSetDifferences(self): - self.assertEqual(({1}, {3}), - edit_config_handler._RemoveOverlapping({1, 2}, {2, 3})) - - def testRemoveOverlapping_AllOverlap_ReturnsEmptySets(self): - self.assertEqual((set(), set()), - edit_config_handler._RemoveOverlapping({1, 2}, {1, 2})) - - -if __name__ == '__main__': - unittest.main() diff --git a/dashboard/dashboard/edit_site_config.py b/dashboard/dashboard/edit_site_config.py index 94de9876ef9..b84ae1d3b68 100644 --- a/dashboard/dashboard/edit_site_config.py +++ b/dashboard/dashboard/edit_site_config.py @@ -20,6 +20,7 @@ from dashboard.common import utils from dashboard.common import xsrf + _NOTIFICATION_EMAIL_BODY = """ The configuration of %(hostname)s was changed by %(user)s. @@ -38,85 +39,86 @@ # The mailing list to which config change notifications are sent, # so that the team can keep an audit record of these changes. # The "gasper-alerts" address is a historic legacy and not important. -_NOTIFICATION_ADDRESS = 'chrome-performance-monitoring-alerts@google.com' +_NOTIFICATION_ADDRESS = 'browser-perf-engprod@google.com' _SENDER_ADDRESS = 'gasper-alerts@google.com' +from flask import request, make_response +# Handles editing of site config values stored with stored_entity. -class EditSiteConfigHandler(request_handler.RequestHandler): - """Handles editing of site config values stored with stored_entity. - - FIXME: One confusing aspect of this page is: If a namespaced config is set, - the non-namespaced configs are probably irrelevant bu tthe field is still - shown. Similarly, if a non-namespaced config is set, the namespaced config - fields are likely not needed, but they're shown. - """ - - def get(self): - """Renders the UI with the form.""" - key = self.request.get('key') - if not key: - self.RenderHtml('edit_site_config.html', {}) - return - - value = stored_object.Get(key) - external_value = namespaced_stored_object.GetExternal(key) - internal_value = namespaced_stored_object.Get(key) - self.RenderHtml( - 'edit_site_config.html', { - 'key': key, - 'value': _FormatJson(value), - 'external_value': _FormatJson(external_value), - 'internal_value': _FormatJson(internal_value), - }) - - @xsrf.TokenRequired - def post(self): - """Accepts posted values, makes changes, and shows the form again.""" - key = self.request.get('key') - - if not utils.IsInternalUser(): - self.RenderHtml( - 'edit_site_config.html', - {'error': 'Only internal users can post to this end-point.'}) - return - - if not key: - self.RenderHtml('edit_site_config.html', {}) - return - - new_value_json = self.request.get('value').strip() - new_external_value_json = self.request.get('external_value').strip() - new_internal_value_json = self.request.get('internal_value').strip() - - template_params = { - 'key': key, - 'value': new_value_json, - 'external_value': new_external_value_json, - 'internal_value': new_internal_value_json, - } - - try: - new_value = json.loads(new_value_json or 'null') - new_external_value = json.loads(new_external_value_json or 'null') - new_internal_value = json.loads(new_internal_value_json or 'null') - except ValueError: - template_params['error'] = 'Invalid JSON in at least one field.' - self.RenderHtml('edit_site_config.html', template_params) - return - - old_value = stored_object.Get(key) - old_external_value = namespaced_stored_object.GetExternal(key) - old_internal_value = namespaced_stored_object.Get(key) - - stored_object.Set(key, new_value) - namespaced_stored_object.SetExternal(key, new_external_value) - namespaced_stored_object.Set(key, new_internal_value) - - _SendNotificationEmail(key, old_value, old_external_value, - old_internal_value, new_value, new_external_value, - new_internal_value) - self.RenderHtml('edit_site_config.html', template_params) +def EditSiteConfigHandlerGet(): + """Renders the UI with the form.""" + key = request.args.get('key') + if not key: + return request_handler.RequestHandlerRenderHtml('edit_site_config.html', {}) + + return_format = request.args.get('format') + + value = stored_object.Get(key) + external_value = namespaced_stored_object.GetExternal(key) + internal_value = namespaced_stored_object.Get(key) + body = { + 'key': key, + 'value': _FormatJson(value), + 'external_value': _FormatJson(external_value), + 'internal_value': _FormatJson(internal_value), + } + if return_format == 'json': + res = make_response(body, 200) + else: + res = request_handler.RequestHandlerRenderHtml('edit_site_config.html', + body) + + return res + + +@xsrf.TokenRequired +def EditSiteConfigHandlerPost(): + """Accepts posted values, makes changes, and shows the form again.""" + key = request.values.get('key') + + if not utils.IsInternalUser(): + res = request_handler.RequestHandlerRenderHtml( + 'edit_site_config.html', + {'error': 'Only internal users can post to this end-point.'}) + return res + + if not key: + return request_handler.RequestHandlerRenderHtml('edit_site_config.html', {}) + + new_value_json = request.values.get('value', '').strip() + new_external_value_json = request.values.get('external_value', '').strip() + new_internal_value_json = request.values.get('internal_value', '').strip() + + template_params = { + 'key': key, + 'value': new_value_json, + 'external_value': new_external_value_json, + 'internal_value': new_internal_value_json, + } + + try: + new_value = json.loads(new_value_json or 'null') + new_external_value = json.loads(new_external_value_json or 'null') + new_internal_value = json.loads(new_internal_value_json or 'null') + except ValueError: + template_params['error'] = 'Invalid JSON in at least one field.' + return request_handler.RequestHandlerRenderHtml('edit_site_config.html', + template_params) + + old_value = stored_object.Get(key) + old_external_value = namespaced_stored_object.GetExternal(key) + old_internal_value = namespaced_stored_object.Get(key) + + stored_object.Set(key, new_value) + namespaced_stored_object.SetExternal(key, new_external_value) + namespaced_stored_object.Set(key, new_internal_value) + + _SendNotificationEmail(key, old_value, old_external_value, old_internal_value, + new_value, new_external_value, new_internal_value) + + return request_handler.RequestHandlerRenderHtml('edit_site_config.html', + template_params) def _SendNotificationEmail(key, old_value, old_external_value, diff --git a/dashboard/dashboard/edit_site_config_test.py b/dashboard/dashboard/edit_site_config_test.py index 66c8fb73816..ab274f476b4 100644 --- a/dashboard/dashboard/edit_site_config_test.py +++ b/dashboard/dashboard/edit_site_config_test.py @@ -6,9 +6,9 @@ from __future__ import division from __future__ import absolute_import +from flask import Flask import unittest -import webapp2 import webtest from google.appengine.api import users @@ -19,14 +19,24 @@ from dashboard.common import testing_common from dashboard.common import xsrf +flask_app = Flask(__name__) + + +@flask_app.route('/edit_site_config', methods=['GET']) +def EditSiteConfigHandlerGet(): + return edit_site_config.EditSiteConfigHandlerGet() + + +@flask_app.route('/edit_site_config', methods=['POST']) +def EditSiteConfigHandlerPost(): + return edit_site_config.EditSiteConfigHandlerPost() + class EditSiteConfigTest(testing_common.TestCase): def setUp(self): - super(EditSiteConfigTest, self).setUp() - app = webapp2.WSGIApplication([('/edit_site_config', - edit_site_config.EditSiteConfigHandler)]) - self.testapp = webtest.TestApp(app) + super().setUp() + self.testapp = webtest.TestApp(flask_app) testing_common.SetIsInternalUser('internal@chromium.org', True) testing_common.SetIsInternalUser('foo@chromium.org', False) self.SetCurrentUser('internal@chromium.org', is_admin=True) @@ -39,15 +49,15 @@ def testGet_WithNonNamespacedKey_ShowsPageWithCurrentValue(self): stored_object.Set('foo', 'XXXYYY') response = self.testapp.get('/edit_site_config?key=foo') self.assertEqual(1, len(response.html('form'))) - self.assertIn('XXXYYY', response.body) + self.assertIn(b'XXXYYY', response.body) def testGet_WithNamespacedKey_ShowsPageWithBothVersions(self): namespaced_stored_object.Set('foo', 'XXXYYY') namespaced_stored_object.SetExternal('foo', 'XXXinternalYYY') response = self.testapp.get('/edit_site_config?key=foo') self.assertEqual(1, len(response.html('form'))) - self.assertIn('XXXYYY', response.body) - self.assertIn('XXXinternalYYY', response.body) + self.assertIn(b'XXXYYY', response.body) + self.assertIn(b'XXXinternalYYY', response.body) def testPost_NoXsrfToken_ReturnsErrorStatus(self): self.testapp.post( @@ -64,7 +74,7 @@ def testPost_ExternalUser_ShowsErrorMessage(self): 'value': '[1, 2, 3]', 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) - self.assertIn('Only internal users', response.body) + self.assertIn(b'Only internal users', response.body) def testPost_WithKey_UpdatesNonNamespacedValues(self): self.testapp.post( @@ -83,7 +93,7 @@ def testPost_WithSomeInvalidJSON_ShowsErrorAndDoesNotModify(self): 'value': '[1, 2, this is not json', 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) - self.assertIn('Invalid JSON', response.body) + self.assertIn(b'Invalid JSON', response.body) self.assertEqual('XXX', stored_object.Get('foo')) def testPost_WithKey_UpdatesNamespacedValues(self): @@ -112,27 +122,28 @@ def testPost_SendsNotificationEmail(self): messages = self.mail_stub.get_sent_messages() self.assertEqual(1, len(messages)) self.assertEqual('gasper-alerts@google.com', messages[0].sender) - self.assertEqual('chrome-performance-monitoring-alerts@google.com', + self.assertEqual('browser-perf-engprod@google.com', messages[0].to) self.assertEqual('Config "foo" changed by internal@chromium.org', messages[0].subject) + self.assertIn( 'Non-namespaced value diff:\n' ' null\n' '\n' 'Externally-visible value diff:\n' ' {\n' - '- "x": 10, \n' + '- "x": 10,\n' '? -\n' '\n' - '+ "x": 1, \n' + '+ "x": 1,\n' ' "y": 2\n' ' }\n' '\n' 'Internal-only value diff:\n' ' {\n' - ' "x": 1, \n' - '+ "y": 2, \n' + ' "x": 1,\n' + '+ "y": 2,\n' ' "z": 3\n' ' }\n', str(messages[0].body)) @@ -143,8 +154,9 @@ def testDiffJson_NoneToEmptyString(self): self.assertEqual('- null\n+ ""', edit_site_config._DiffJson(None, '')) def testDiffJson_AddListItem(self): - self.assertEqual(' [\n 1, \n+ 2, \n 3\n ]', - edit_site_config._DiffJson([1, 3], [1, 2, 3])) + self.assertEqual( + ' [\n 1,\n+ 2,\n 3\n ]', + edit_site_config._DiffJson([1, 3], [1, 2, 3]).replace(", ", ",")) if __name__ == '__main__': diff --git a/dashboard/dashboard/elements/alerts-page.html b/dashboard/dashboard/elements/alerts-page.html index ba50c143078..b9d84dba7ba 100644 --- a/dashboard/dashboard/elements/alerts-page.html +++ b/dashboard/dashboard/elements/alerts-page.html @@ -184,7 +184,7 @@
@@ -276,7 +276,7 @@

All alerts triaged!

return; } const params = uri.getAllParameters(); - const oldSheriff = params.sheriff || 'Chromium Perf Sheriff'; + const oldSheriff = params.sheriff || ''; if (oldSheriff === sheriff) { return; } @@ -366,7 +366,7 @@

All alerts triaged!

this.sortBy = uri.getParameter('sortby', 'end_revision'); this.sortDirection = uri.getParameter('sortdirection', 'down'); this.selectedSheriffIndex = this.sheriffList.indexOf( - uri.getParameter('sheriff', 'Chromium Perf Sheriff')); + uri.getParameter('sheriff', '')); this.showingImprovements = uri.getParameter('improvements', false); this.showingTriaged = uri.getParameter('triaged', false); }, @@ -391,12 +391,13 @@

All alerts triaged!

}, ready() { - this.sheriff = uri.getParameter('sheriff', 'Chromium Perf Sheriff'); + this.sheriff = uri.getParameter('sheriff', null); this.showImprovements = uri.getParameter('improvements', false); this.showTriaged = uri.getParameter('triaged', false); - const params = { - 'sheriff': this.sheriff - }; + const params = {} + if (this.sheriff != null) { + params.sheriff = this.sheriff + } if (this.showImprovements) { params.improvements = true; } diff --git a/dashboard/dashboard/elements/chart-container.html b/dashboard/dashboard/elements/chart-container.html index 0fc1a0d3917..597beb2aeea 100644 --- a/dashboard/dashboard/elements/chart-container.html +++ b/dashboard/dashboard/elements/chart-container.html @@ -398,7 +398,7 @@ }, data: {}, error_bars: {}, - warning: null + warning: null, }; } }, @@ -2264,6 +2264,16 @@ this.$.tooltip.pointId = pointId; this.$.tooltip.revisions = this.getRevisions( jsonSeriesIndex, dataIndex); + if ('a_bot_id' in annotation) { + const botId = annotation.a_bot_id.sort().slice(0, 3) + if (annotation.a_bot_id.length > 3) { + botId.push(` and ${annotation.a_bot_id.length - 3} more bots`) + } + this.$.tooltip.botId = botId + } + if ('a_os_detail_vers' in annotation) { + this.$.tooltip.osDetailVers = annotation.a_os_detail_vers + } this.$.tooltip.links = this.getTooltipLinks(annotation, series.path, pointId); if (annotation.timestamp) { @@ -2336,6 +2346,8 @@ this.$.tooltip.alerts = null; this.$.tooltip.triagedAlerts = null; this.$.tooltip.bugId = null; + this.$.tooltip.botId = null; + this.$.tooltip.osDetailVers = null; this.$.tooltip.projectId = null; this.$.tooltip.alertKey = null; this.$.tooltip.recovered = null; diff --git a/dashboard/dashboard/elements/chart-tooltip.html b/dashboard/dashboard/elements/chart-tooltip.html index 8594d337d68..ddd91d2d74a 100644 --- a/dashboard/dashboard/elements/chart-tooltip.html +++ b/dashboard/dashboard/elements/chart-tooltip.html @@ -84,7 +84,9 @@
Point ID: {{pointId}}
- Time added: {{timestamp}} + Time added: {{timestamp}}
+ Test Bot(s): {{botId}}
+ OS Version: {{osDetailVers}}
diff --git a/dashboard/dashboard/elements/report-page.html b/dashboard/dashboard/elements/report-page.html index eeb24d90b78..12e7736d821 100644 --- a/dashboard/dashboard/elements/report-page.html +++ b/dashboard/dashboard/elements/report-page.html @@ -33,7 +33,7 @@