From 9a8f29e571e8ac979fe917d76f36fb2ece34b6ea Mon Sep 17 00:00:00 2001 From: Alok Singh <8325708+alok@users.noreply.github.com> Date: Sat, 19 May 2018 16:07:28 -0700 Subject: [PATCH] YAPF, take 3 (#2098) * Use pep8 style The original style file is actually just pep8 style, but with everything spelled out. It's easier to use the `based_on_style` feature. Any overrides are clearer that way. * Improve yapf script 1. Do formatting in parallel 2. Lint RLlib 3. Use .style.yapf file * Pull out expressions into variables * Don't format rllib * Don't allow splits in dicts * Apply yapf * Disallow single line if-statements * Use arithmetic comparison * Simplify checking for changed files * Pull out expr into var --- .style.yapf | 191 +----------- .travis/yapf.sh | 40 +-- python/ray/actor.py | 40 +-- python/ray/autoscaler/autoscaler.py | 12 +- python/ray/autoscaler/aws/config.py | 31 +- python/ray/autoscaler/aws/node_provider.py | 9 +- python/ray/autoscaler/commands.py | 6 +- python/ray/experimental/state.py | 339 ++++++++------------- python/ray/monitor.py | 8 +- python/ray/tune/test/trial_runner_test.py | 6 +- python/ray/tune/trainable.py | 6 +- python/ray/worker.py | 4 +- python/setup.py | 5 +- test/runtest.py | 21 +- 14 files changed, 218 insertions(+), 500 deletions(-) diff --git a/.style.yapf b/.style.yapf index 8572636c2268b..b6a9c447ce675 100644 --- a/.style.yapf +++ b/.style.yapf @@ -1,189 +1,4 @@ [style] -# Align closing bracket with visual indentation. -align_closing_bracket_with_visual_indent=True - -# Allow dictionary keys to exist on multiple lines. For example: -# -# x = { -# ('this is the first element of a tuple', -# 'this is the second element of a tuple'): -# value, -# } -allow_multiline_dictionary_keys=False - -# Allow lambdas to be formatted on more than one line. -allow_multiline_lambdas=False - -# Insert a blank line before a class-level docstring. -blank_line_before_class_docstring=False - -# Insert a blank line before a 'def' or 'class' immediately nested -# within another 'def' or 'class'. For example: -# -# class Foo: -# # <------ this blank line -# def method(): -# ... -blank_line_before_nested_class_or_def=False - -# Do not split consecutive brackets. Only relevant when -# dedent_closing_brackets is set. For example: -# -# call_func_that_takes_a_dict( -# { -# 'key1': 'value1', -# 'key2': 'value2', -# } -# ) -# -# would reformat to: -# -# call_func_that_takes_a_dict({ -# 'key1': 'value1', -# 'key2': 'value2', -# }) -coalesce_brackets=True - -# The column limit. -column_limit=79 - -# Indent width used for line continuations. -continuation_indent_width=4 - -# Put closing brackets on a separate line, dedented, if the bracketed -# expression can't fit in a single line. Applies to all kinds of brackets, -# including function definitions and calls. For example: -# -# config = { -# 'key1': 'value1', -# 'key2': 'value2', -# } # <--- this bracket is dedented and on a separate line -# -# time_series = self.remote_client.query_entity_counters( -# entity='dev3246.region1', -# key='dns.query_latency_tcp', -# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), -# start_ts=now()-timedelta(days=3), -# end_ts=now(), -# ) # <--- this bracket is dedented and on a separate line -dedent_closing_brackets=False - -# Place each dictionary entry onto its own line. -each_dict_entry_on_separate_line=True - -# The regex for an i18n comment. The presence of this comment stops -# reformatting of that line, because the comments are required to be -# next to the string they translate. -i18n_comment= - -# The i18n function call names. The presence of this function stops -# reformattting on that line, because the string it has cannot be moved -# away from the i18n comment. -i18n_function_call= - -# Indent the dictionary value if it cannot fit on the same line as the -# dictionary key. For example: -# -# config = { -# 'key1': -# 'value1', -# 'key2': value1 + -# value2, -# } -indent_dictionary_value=True - -# The number of columns to use for indentation. -indent_width=4 - -# Join short lines into one line. E.g., single line 'if' statements. -join_multiple_lines=True - -# Do not include spaces around selected binary operators. For example: -# -# 1 + 2 * 3 - 4 / 5 -# -# will be formatted as follows when configured with a value "*,/": -# -# 1 + 2*3 - 4/5 -# -no_spaces_around_selected_binary_operators=set([]) - -# Use spaces around default or named assigns. -spaces_around_default_or_named_assign=False - -# Use spaces around the power operator. -spaces_around_power_operator=False - -# The number of spaces required before a trailing comment. -spaces_before_comment=2 - -# Insert a space between the ending comma and closing bracket of a list, -# etc. -space_between_ending_comma_and_closing_bracket=True - -# Split before arguments if the argument list is terminated by a -# comma. -split_arguments_when_comma_terminated=False - -# Set to True to prefer splitting before '&', '|' or '^' rather than -# after. -split_before_bitwise_operator=True - -# Split before a dictionary or set generator (comp_for). For example, note -# the split before the 'for': -# -# foo = { -# variable: 'Hello world, have a nice day!' -# for variable in bar if variable != 42 -# } -split_before_dict_set_generator=True - -# If an argument / parameter list is going to be split, then split before -# the first argument. -split_before_first_argument=False - -# Set to True to prefer splitting before 'and' or 'or' rather than -# after. -split_before_logical_operator=True - -# Split named assignments onto individual lines. -split_before_named_assigns=True - -# The penalty for splitting right after the opening bracket. -split_penalty_after_opening_bracket=30 - -# The penalty for splitting the line after a unary operator. -split_penalty_after_unary_operator=10000 - -# The penalty for splitting right before an if expression. -split_penalty_before_if_expr=0 - -# The penalty of splitting the line around the '&', '|', and '^' -# operators. -split_penalty_bitwise_operator=300 - -# The penalty for characters over the column limit. -split_penalty_excess_character=4500 - -# The penalty incurred by adding a line split to the unwrapped line. The -# more line splits added the higher the penalty. -split_penalty_for_added_line_split=30 - -# The penalty of splitting a list of "import as" names. For example: -# -# from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, -# long_argument_2, -# long_argument_3) -# -# would reformat to something like: -# -# from a_very_long_or_indented_module_name_yada_yad import ( -# long_argument_1, long_argument_2, long_argument_3) -split_penalty_import_names=0 - -# The penalty of splitting the line around the 'and' and 'or' -# operators. -split_penalty_logical_operator=300 - -# Use the Tab character for indentation. -use_tabs=False +based_on_style=pep8 +allow_split_before_dict_value=False +join_multiple_lines=False diff --git a/.travis/yapf.sh b/.travis/yapf.sh index b8af8656c0407..03828d6627d82 100755 --- a/.travis/yapf.sh +++ b/.travis/yapf.sh @@ -1,27 +1,29 @@ #!/usr/bin/env bash # Cause the script to exit if a single command fails -set -e +set -eo pipefail -ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) +# this stops git rev-parse from failing if we run this from the .git directory +builtin cd "$(dirname "${BASH_SOURCE:-$0}")" -pushd $ROOT_DIR/../test - find . -name '*.py' -type f -exec yapf --style=pep8 -i -r {} \; -popd +ROOT="$(git rev-parse --show-toplevel)" +builtin cd "$ROOT" -pushd $ROOT_DIR/../python - find . -name '*.py' -type f -not -path './ray/dataframe/*' -not -path './ray/rllib/*' -not -path './ray/cloudpickle/*' -exec yapf --style=pep8 -i -r {} \; -popd +yapf \ + --style "$ROOT/.style.yapf" \ + --in-place --recursive --parallel \ + --exclude 'python/ray/cloudpickle' \ + --exclude 'python/ray/dataframe' \ + --exclude 'python/ray/rllib' \ + -- \ + 'test' 'python' -CHANGED_FILES=(`git diff --name-only`) -if [ "$CHANGED_FILES" ]; then - echo 'Reformatted staged files. Please review and stage the changes.' - echo - echo 'Files updated:' - for file in ${CHANGED_FILES[@]}; do - echo " $file" - done - exit 1 -else - exit 0 +if ! git diff --quiet; then + echo 'Reformatted staged files. Please review and stage the changes.' + echo 'Files updated:' + echo + + git --no-pager diff --name-only + + exit 1 fi diff --git a/python/ray/actor.py b/python/ray/actor.py index 1023e334d486d..9264a4fd053b6 100644 --- a/python/ray/actor.py +++ b/python/ray/actor.py @@ -857,32 +857,20 @@ def _serialization_helper(self, ray_forking): A dictionary of the information needed to reconstruct the object. """ state = { - "actor_id": - self._ray_actor_id.id(), - "class_name": - self._ray_class_name, - "actor_forks": - self._ray_actor_forks, - "actor_cursor": - self._ray_actor_cursor.id(), - "actor_counter": - 0, # Reset the actor counter. - "actor_method_names": - self._ray_actor_method_names, - "method_signatures": - self._ray_method_signatures, - "method_num_return_vals": - self._ray_method_num_return_vals, - "actor_creation_dummy_object_id": - self._ray_actor_creation_dummy_object_id.id(), - "actor_method_cpus": - self._ray_actor_method_cpus, - "actor_driver_id": - self._ray_actor_driver_id.id(), - "previous_actor_handle_id": - self._ray_actor_handle_id.id(), - "ray_forking": - ray_forking + "actor_id": self._ray_actor_id.id(), + "class_name": self._ray_class_name, + "actor_forks": self._ray_actor_forks, + "actor_cursor": self._ray_actor_cursor.id(), + "actor_counter": 0, # Reset the actor counter. + "actor_method_names": self._ray_actor_method_names, + "method_signatures": self._ray_method_signatures, + "method_num_return_vals": self._ray_method_num_return_vals, + "actor_creation_dummy_object_id": self. + _ray_actor_creation_dummy_object_id.id(), + "actor_method_cpus": self._ray_actor_method_cpus, + "actor_driver_id": self._ray_actor_driver_id.id(), + "previous_actor_handle_id": self._ray_actor_handle_id.id(), + "ray_forking": ray_forking } if ray_forking: diff --git a/python/ray/autoscaler/autoscaler.py b/python/ray/autoscaler/autoscaler.py index 502b4c6c783ed..a3f64c2d2d33a 100644 --- a/python/ray/autoscaler/autoscaler.py +++ b/python/ray/autoscaler/autoscaler.py @@ -182,19 +182,15 @@ def _info(self): nodes_used += max_frac idle_times = [now - t for t in self.last_used_time_by_ip.values()] return { - "ResourceUsage": - ", ".join([ + "ResourceUsage": ", ".join([ "{}/{} {}".format( round(resources_used[rid], 2), round(resources_total[rid], 2), rid) for rid in sorted(resources_used) ]), - "NumNodesConnected": - len(self.static_resources_by_ip), - "NumNodesUsed": - round(nodes_used, 2), - "NodeIdleSeconds": - "Min={} Mean={} Max={}".format( + "NumNodesConnected": len(self.static_resources_by_ip), + "NumNodesUsed": round(nodes_used, 2), + "NodeIdleSeconds": "Min={} Mean={} Max={}".format( int(np.min(idle_times)) if idle_times else -1, int(np.mean(idle_times)) if idle_times else -1, int(np.max(idle_times)) if idle_times else -1), diff --git a/python/ray/autoscaler/aws/config.py b/python/ray/autoscaler/aws/config.py index 3e3facdcf86f0..0c9715b10df28 100644 --- a/python/ray/autoscaler/aws/config.py +++ b/python/ray/autoscaler/aws/config.py @@ -209,22 +209,21 @@ def _configure_security_group(config): assert security_group, "Failed to create security group" if not security_group.ip_permissions: - security_group.authorize_ingress( - IpPermissions=[{ - "FromPort": -1, - "ToPort": -1, - "IpProtocol": "-1", - "UserIdGroupPairs": [{ - "GroupId": security_group.id - }] - }, { - "FromPort": 22, - "ToPort": 22, - "IpProtocol": "TCP", - "IpRanges": [{ - "CidrIp": "0.0.0.0/0" - }] - }]) + security_group.authorize_ingress(IpPermissions=[{ + "FromPort": -1, + "ToPort": -1, + "IpProtocol": "-1", + "UserIdGroupPairs": [{ + "GroupId": security_group.id + }] + }, { + "FromPort": 22, + "ToPort": 22, + "IpProtocol": "TCP", + "IpRanges": [{ + "CidrIp": "0.0.0.0/0" + }] + }]) if "SecurityGroupIds" not in config["head_node"]: print("SecurityGroupIds not specified for head node, using {}".format( diff --git a/python/ray/autoscaler/aws/node_provider.py b/python/ray/autoscaler/aws/node_provider.py index a31d3e51b8548..cec58fa8f39e1 100644 --- a/python/ray/autoscaler/aws/node_provider.py +++ b/python/ray/autoscaler/aws/node_provider.py @@ -101,12 +101,9 @@ def create_node(self, node_config, tags, count): "Value": v, }) conf.update({ - "MinCount": - 1, - "MaxCount": - count, - "TagSpecifications": - conf.get("TagSpecifications", []) + [{ + "MinCount": 1, + "MaxCount": count, + "TagSpecifications": conf.get("TagSpecifications", []) + [{ "ResourceType": "instance", "Tags": tag_pairs, }] diff --git a/python/ray/autoscaler/commands.py b/python/ray/autoscaler/commands.py index 5ccc8eaac60b6..f38e99ff4ef54 100644 --- a/python/ray/autoscaler/commands.py +++ b/python/ray/autoscaler/commands.py @@ -128,10 +128,8 @@ def get_or_create_head_node(config, no_restart, yes): remote_config_file.write(json.dumps(remote_config)) remote_config_file.flush() config["file_mounts"].update({ - remote_key_path: - config["auth"]["ssh_private_key"], - "~/ray_bootstrap_config.yaml": - remote_config_file.name + remote_key_path: config["auth"]["ssh_private_key"], + "~/ray_bootstrap_config.yaml": remote_config_file.name }) if no_restart: diff --git a/python/ray/experimental/state.py b/python/ray/experimental/state.py index 3ded77401b206..f5e020ba1553e 100644 --- a/python/ray/experimental/state.py +++ b/python/ray/experimental/state.py @@ -267,30 +267,20 @@ def _task_table(self, task_id): task_spec = ray.local_scheduler.task_from_string(task_spec) task_spec_info = { - "DriverID": - binary_to_hex(task_spec.driver_id().id()), - "TaskID": - binary_to_hex(task_spec.task_id().id()), - "ParentTaskID": - binary_to_hex(task_spec.parent_task_id().id()), - "ParentCounter": - task_spec.parent_counter(), - "ActorID": - binary_to_hex(task_spec.actor_id().id()), - "ActorCreationID": - binary_to_hex(task_spec.actor_creation_id().id()), - "ActorCreationDummyObjectID": - binary_to_hex(task_spec.actor_creation_dummy_object_id().id()), - "ActorCounter": - task_spec.actor_counter(), - "FunctionID": - binary_to_hex(task_spec.function_id().id()), - "Args": - task_spec.arguments(), - "ReturnObjectIDs": - task_spec.returns(), - "RequiredResources": - task_spec.required_resources() + "DriverID": binary_to_hex(task_spec.driver_id().id()), + "TaskID": binary_to_hex(task_spec.task_id().id()), + "ParentTaskID": binary_to_hex(task_spec.parent_task_id().id()), + "ParentCounter": task_spec.parent_counter(), + "ActorID": binary_to_hex(task_spec.actor_id().id()), + "ActorCreationID": binary_to_hex( + task_spec.actor_creation_id().id()), + "ActorCreationDummyObjectID": binary_to_hex( + task_spec.actor_creation_dummy_object_id().id()), + "ActorCounter": task_spec.actor_counter(), + "FunctionID": binary_to_hex(task_spec.function_id().id()), + "Args": task_spec.arguments(), + "ReturnObjectIDs": task_spec.returns(), + "RequiredResources": task_spec.required_resources() } execution_dependencies_message = ( @@ -308,18 +298,14 @@ def _task_table(self, task_id): # ExecutionDependencies. However, it is currently used in monitor.py. return { - "State": - task_table_message.State(), - "LocalSchedulerID": - binary_to_hex(task_table_message.LocalSchedulerId()), - "ExecutionDependenciesString": - task_table_message.ExecutionDependencies(), - "ExecutionDependencies": - execution_dependencies, - "SpillbackCount": - task_table_message.SpillbackCount(), - "TaskSpec": - task_spec_info + "State": task_table_message.State(), + "LocalSchedulerID": binary_to_hex( + task_table_message.LocalSchedulerId()), + "ExecutionDependenciesString": task_table_message. + ExecutionDependencies(), + "ExecutionDependencies": execution_dependencies, + "SpillbackCount": task_table_message.SpillbackCount(), + "TaskSpec": task_spec_info } def task_table(self, task_id=None): @@ -652,78 +638,49 @@ def micros_rel(ts): if breakdowns: if "get_arguments_end" in info: get_args_trace = { - "cat": - "get_arguments", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "id": - task_id, - "ts": - micros_rel(info["get_arguments_start"]), - "ph": - "X", - "name": - info["function_name"] + ":get_arguments", - "args": - total_info, - "dur": - micros(info["get_arguments_end"] - - info["get_arguments_start"]), - "cname": - "rail_idle" + "cat": "get_arguments", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "id": task_id, + "ts": micros_rel(info["get_arguments_start"]), + "ph": "X", + "name": info["function_name"] + ":get_arguments", + "args": total_info, + "dur": micros(info["get_arguments_end"] - + info["get_arguments_start"]), + "cname": "rail_idle" } full_trace.append(get_args_trace) if "store_outputs_end" in info: outputs_trace = { - "cat": - "store_outputs", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "id": - task_id, - "ts": - micros_rel(info["store_outputs_start"]), - "ph": - "X", - "name": - info["function_name"] + ":store_outputs", - "args": - total_info, - "dur": - micros(info["store_outputs_end"] - - info["store_outputs_start"]), - "cname": - "thread_state_runnable" + "cat": "store_outputs", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "id": task_id, + "ts": micros_rel(info["store_outputs_start"]), + "ph": "X", + "name": info["function_name"] + ":store_outputs", + "args": total_info, + "dur": micros(info["store_outputs_end"] - + info["store_outputs_start"]), + "cname": "thread_state_runnable" } full_trace.append(outputs_trace) if "execute_end" in info: execute_trace = { - "cat": - "execute", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "id": - task_id, - "ts": - micros_rel(info["execute_start"]), - "ph": - "X", - "name": - info["function_name"] + ":execute", - "args": - total_info, - "dur": - micros(info["execute_end"] - info["execute_start"]), - "cname": - "rail_animation" + "cat": "execute", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "id": task_id, + "ts": micros_rel(info["execute_start"]), + "ph": "X", + "name": info["function_name"] + ":execute", + "args": total_info, + "dur": micros(info["execute_end"] - + info["execute_start"]), + "cname": "rail_animation" } full_trace.append(execute_trace) @@ -733,72 +690,53 @@ def micros_rel(ts): parent_times = self._get_times(parent_info) parent_profile = task_info.get( task_table[task_id]["TaskSpec"]["ParentTaskID"]) + + _parent_id = parent_info["worker_id"] + str( + micros(min(parent_times))) + parent = { - "cat": - "submit_task", - "pid": - "Node " + parent_worker["node_ip_address"], - "tid": - parent_info["worker_id"], - "ts": - micros_rel(parent_profile - and parent_profile["get_arguments_start"] - or start_time), - "ph": - "s", - "name": - "SubmitTask", + "cat": "submit_task", + "pid": "Node " + parent_worker["node_ip_address"], + "tid": parent_info["worker_id"], + "ts": micros_rel( + parent_profile + and parent_profile["get_arguments_start"] + or start_time), + "ph": "s", + "name": "SubmitTask", "args": {}, - "id": (parent_info["worker_id"] + str( - micros(min(parent_times)))) + "id": _parent_id, } full_trace.append(parent) + _id = info["worker_id"] + str(micros(min(parent_times))) + task_trace = { - "cat": - "submit_task", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "ts": - micros_rel(info["get_arguments_start"]), - "ph": - "f", - "name": - "SubmitTask", + "cat": "submit_task", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "ts": micros_rel(info["get_arguments_start"]), + "ph": "f", + "name": "SubmitTask", "args": {}, - "id": - (info["worker_id"] + str(micros(min(parent_times)))), - "bp": - "e", - "cname": - "olive" + "id": _id, + "bp": "e", + "cname": "olive" } full_trace.append(task_trace) task = { - "cat": - "task", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "id": - task_id, - "ts": - micros_rel(info["get_arguments_start"]), - "ph": - "X", - "name": - info["function_name"], - "args": - total_info, - "dur": - micros(info["store_outputs_end"] - - info["get_arguments_start"]), - "cname": - "thread_state_runnable" + "cat": "task", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "id": task_id, + "ts": micros_rel(info["get_arguments_start"]), + "ph": "X", + "name": info["function_name"], + "args": total_info, + "dur": micros(info["store_outputs_end"] - + info["get_arguments_start"]), + "cname": "thread_state_runnable" } full_trace.append(task) @@ -808,45 +746,37 @@ def micros_rel(ts): parent_times = self._get_times(parent_info) parent_profile = task_info.get( task_table[task_id]["TaskSpec"]["ParentTaskID"]) + + _parent_id = parent_info["worker_id"] + str( + micros(min(parent_times))) + parent = { - "cat": - "submit_task", - "pid": - "Node " + parent_worker["node_ip_address"], - "tid": - parent_info["worker_id"], - "ts": - micros_rel(parent_profile - and parent_profile["get_arguments_start"] - or start_time), - "ph": - "s", - "name": - "SubmitTask", + "cat": "submit_task", + "pid": "Node " + parent_worker["node_ip_address"], + "tid": parent_info["worker_id"], + "ts": micros_rel( + parent_profile + and parent_profile["get_arguments_start"] + or start_time), + "ph": "s", + "name": "SubmitTask", "args": {}, - "id": (parent_info["worker_id"] + str( - micros(min(parent_times)))) + "id": _parent_id, } full_trace.append(parent) + _id = info["worker_id"] + str(micros(min(parent_times))) + task_trace = { - "cat": - "submit_task", - "pid": - "Node " + worker["node_ip_address"], - "tid": - info["worker_id"], - "ts": - micros_rel(info["get_arguments_start"]), - "ph": - "f", - "name": - "SubmitTask", + "cat": "submit_task", + "pid": "Node " + worker["node_ip_address"], + "tid": info["worker_id"], + "ts": micros_rel(info["get_arguments_start"]), + "ph": "f", + "name": "SubmitTask", "args": {}, - "id": - (info["worker_id"] + str(micros(min(parent_times)))), - "bp": - "e" + "id": _id, + "bp": "e" } full_trace.append(task_trace) @@ -879,26 +809,19 @@ def micros_rel(ts): # duration event that it's associated with, and # the flow event therefore always gets drawn. owner = { - "cat": - "obj_dependency", + "cat": "obj_dependency", "pid": ("Node " + owner_worker["node_ip_address"]), - "tid": - task_info[owner_task]["worker_id"], - "ts": - micros_rel(task_info[owner_task] - ["store_outputs_end"]) - 2, - "ph": - "s", - "name": - "ObjectDependency", + "tid": task_info[owner_task]["worker_id"], + "ts": micros_rel(task_info[owner_task] + ["store_outputs_end"]) - + 2, + "ph": "s", + "name": "ObjectDependency", "args": {}, - "bp": - "e", - "cname": - "cq_build_attempt_failed", - "id": - "obj" + str(arg) + str(seen_obj[arg]) + "bp": "e", + "cname": "cq_build_attempt_failed", + "id": "obj" + str(arg) + str(seen_obj[arg]) } full_trace.append(owner) @@ -906,8 +829,8 @@ def micros_rel(ts): "cat": "obj_dependency", "pid": "Node " + worker["node_ip_address"], "tid": info["worker_id"], - "ts": - micros_rel(info["get_arguments_start"]) + 2, + "ts": micros_rel(info["get_arguments_start"]) + + 2, "ph": "f", "name": "ObjectDependency", "args": {}, @@ -985,8 +908,8 @@ def workers(self): worker_id = binary_to_hex(worker_key[len("Workers:"):]) workers_data[worker_id] = { - "local_scheduler_socket": - (worker_info[b"local_scheduler_socket"].decode("ascii")), + "local_scheduler_socket": ( + worker_info[b"local_scheduler_socket"].decode("ascii")), "node_ip_address": (worker_info[b"node_ip_address"] .decode("ascii")), "plasma_manager_socket": (worker_info[b"plasma_manager_socket"] @@ -1012,8 +935,8 @@ def actors(self): actor_info[binary_to_hex(actor_id)] = { "class_id": binary_to_hex(info[b"class_id"]), "driver_id": binary_to_hex(info[b"driver_id"]), - "local_scheduler_id": - binary_to_hex(info[b"local_scheduler_id"]), + "local_scheduler_id": binary_to_hex( + info[b"local_scheduler_id"]), "num_gpus": int(info[b"num_gpus"]), "removed": decode(info[b"removed"]) == "True" } diff --git a/python/ray/monitor.py b/python/ray/monitor.py index 734daf3642582..c628a279ae058 100644 --- a/python/ray/monitor.py +++ b/python/ray/monitor.py @@ -503,11 +503,13 @@ def run(self): self.cleanup_task_table() if len(self.dead_plasma_managers) > 0: self.cleanup_object_table() + + num_plasma_managers = len(self.live_plasma_managers) + len( + self.dead_plasma_managers) + log.debug("{} dead local schedulers, {} plasma managers total, {} " "dead plasma managers".format( - len(self.dead_local_schedulers), - (len(self.live_plasma_managers) + len( - self.dead_plasma_managers)), + len(self.dead_local_schedulers), num_plasma_managers, len(self.dead_plasma_managers))) # Handle messages from the subscription channels. diff --git a/python/ray/tune/test/trial_runner_test.py b/python/ray/tune/test/trial_runner_test.py index 2eba2693d1079..f762b48487283 100644 --- a/python/ray/tune/test/trial_runner_test.py +++ b/python/ray/tune/test/trial_runner_test.py @@ -526,13 +526,11 @@ def testDependentGridSearch(self): trials = generate_trials({ "run": "PPO", "config": { - "x": - grid_search([ + "x": grid_search([ lambda spec: spec.config.y * 100, lambda spec: spec.config.y * 200 ]), - "y": - lambda spec: 1, + "y": lambda spec: 1, }, }) trials = list(trials) diff --git a/python/ray/tune/trainable.py b/python/ray/tune/trainable.py index a801b89f58864..f0351e1288344 100644 --- a/python/ray/tune/trainable.py +++ b/python/ray/tune/trainable.py @@ -203,10 +203,8 @@ def save_to_object(self): out = io.BytesIO() with gzip.GzipFile(fileobj=out, mode="wb") as f: compressed = pickle.dumps({ - "checkpoint_name": - os.path.basename(checkpoint_prefix), - "data": - data, + "checkpoint_name": os.path.basename(checkpoint_prefix), + "data": data, }) if len(compressed) > 10e6: # getting pretty large print("Checkpoint size is {} bytes".format(len(compressed))) diff --git a/python/ray/worker.py b/python/ray/worker.py index ccc0028f1d234..230265898016f 100644 --- a/python/ray/worker.py +++ b/python/ray/worker.py @@ -1558,8 +1558,8 @@ def _init(address_info=None, driver_address_info = { "node_ip_address": node_ip_address, "redis_address": address_info["redis_address"], - "store_socket_name": - (address_info["object_store_addresses"][0].name), + "store_socket_name": ( + address_info["object_store_addresses"][0].name), "webui_url": address_info["webui_url"] } if not use_raylet: diff --git a/python/setup.py b/python/setup.py index 5ef7e4149a28b..8f71376e68fb0 100644 --- a/python/setup.py +++ b/python/setup.py @@ -52,8 +52,9 @@ optional_ray_files += ray_autoscaler_files extras = { - "rllib": - ["tensorflow", "pyyaml", "gym[atari]", "opencv-python", "lz4", "scipy"] + "rllib": [ + "tensorflow", "pyyaml", "gym[atari]", "opencv-python", "lz4", "scipy" + ] } diff --git a/test/runtest.py b/test/runtest.py index b5923c7e96090..9e407c72329b2 100644 --- a/test/runtest.py +++ b/test/runtest.py @@ -180,14 +180,15 @@ class CustomError(Exception): TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS] # The check that type(obj).__module__ != "numpy" should be unnecessary, but # otherwise this seems to fail on Mac OS X on Travis. -DICT_OBJECTS = ([{ - obj: obj -} for obj in PRIMITIVE_OBJECTS if ( - obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{ - 0: obj - } for obj in BASE_OBJECTS] + [{ - Foo(123): Foo(456) - }]) +DICT_OBJECTS = ( + [{ + obj: obj + } for obj in PRIMITIVE_OBJECTS + if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{ + 0: obj + } for obj in BASE_OBJECTS] + [{ + Foo(123): Foo(456) + }]) RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS @@ -720,8 +721,8 @@ def g(): assert ray.get([id1, id2, id3]) == [0, 1, 2] assert ray.get( g._submit( - args=[], num_cpus=1, num_gpus=1, resources={"Custom": - 1})) == [0] + args=[], num_cpus=1, num_gpus=1, + resources={"Custom": 1})) == [0] infeasible_id = g._submit(args=[], resources={"NonexistentCustom": 1}) ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=50) assert len(ready_ids) == 0