From 33702a07bd5e6c635458fd9e2b912a06fa52a8cb Mon Sep 17 00:00:00 2001 From: refai06 Date: Fri, 7 Feb 2025 22:08:23 +0530 Subject: [PATCH 1/7] Experimental-export-module-refactoring Signed-off-by: refai06 --- ...kspace_Creation_from_JupyterNotebook.ipynb | 10 +- .../TwoPartyWorkspaceCreation.ipynb | 8 +- .../workflow/interface/cli/workspace.py | 4 +- .../workflow/notebooktools/__init__.py | 8 + .../code_analyzer.py} | 442 +++++++----------- .../workflow/notebooktools/notebook_tools.py | 253 ++++++++++ .../workflow/runtime/federated_runtime.py | 6 +- .../workflow/workspace_export/__init__.py | 5 - 8 files changed, 449 insertions(+), 287 deletions(-) create mode 100644 openfl/experimental/workflow/notebooktools/__init__.py rename openfl/experimental/workflow/{workspace_export/export.py => notebooktools/code_analyzer.py} (51%) create mode 100644 openfl/experimental/workflow/notebooktools/notebook_tools.py delete mode 100644 openfl/experimental/workflow/workspace_export/__init__.py diff --git a/openfl-tutorials/experimental/workflow/1001_Workspace_Creation_from_JupyterNotebook.ipynb b/openfl-tutorials/experimental/workflow/1001_Workspace_Creation_from_JupyterNotebook.ipynb index ab31b26d50..525365a2a9 100644 --- a/openfl-tutorials/experimental/workflow/1001_Workspace_Creation_from_JupyterNotebook.ipynb +++ b/openfl-tutorials/experimental/workflow/1001_Workspace_Creation_from_JupyterNotebook.ipynb @@ -22,7 +22,7 @@ "##### High Level Overview of Methodology\n", "1. User annotates the relevant cells of the Jupyter notebook with `#| export` directive\n", "2. We then Leverage `nbdev` functionality to export these annotated cells of Jupyter notebook into a Python script\n", - "3. Utilize OpenFL experimental workflow module `WorkspaceExport` to convert the Python script into a OpenFL workspace\n", + "3. Utilize OpenFL experimental workflow module `NotebookTools` to convert the Python script into a OpenFL workspace\n", "4. User can utilize the experimental `fx` commands to deploy and run the federation seamlessly\n", "\n", "\n", @@ -946,7 +946,7 @@ "The following cells convert the Jupyter notebook into a Python script and create a Template Workspace that can be utilized by Aggregator based Workflow\n", "> NOTE: Only Notebook cells that were marked with `#| export` directive shall be included in this Python script\n", "\n", - "We first import `WorkspaceExport` module and execute `WorkspaceExport.export()` that converts the notebook and generates the template workspace. User is required to specify: \n", + "We first import `NotebookTools` module and execute `NotebookTools.export()` that converts the notebook and generates the template workspace. User is required to specify: \n", "1. `notebook_path`: path of the Jupyter notebook that is required to be converted\n", "2. `output_workspace`: path where the converted workspace is stored" ] @@ -959,9 +959,9 @@ "outputs": [], "source": [ "import os\n", - "from openfl.experimental.workflow.workspace_export import WorkspaceExport\n", + "from openfl.experimental.workflow.notebooktools import NotebookTools\n", "\n", - "WorkspaceExport.export(\n", + "NotebookTools.export(\n", " notebook_path='./1001_Workspace_Creation_from_JupyterNotebook.ipynb',\n", " output_workspace=f\"/home/{os.environ['USER']}/generated-workspace\"\n", ")" @@ -1065,7 +1065,7 @@ ], "metadata": { "kernelspec": { - "display_name": "fed_run", + "display_name": "dir_workspace_3.10", "language": "python", "name": "python3" }, diff --git a/openfl-tutorials/experimental/workflow/Vertical_FL/TwoPartyWorkspaceCreation.ipynb b/openfl-tutorials/experimental/workflow/Vertical_FL/TwoPartyWorkspaceCreation.ipynb index 1395a5095a..1cfc63c5f3 100644 --- a/openfl-tutorials/experimental/workflow/Vertical_FL/TwoPartyWorkspaceCreation.ipynb +++ b/openfl-tutorials/experimental/workflow/Vertical_FL/TwoPartyWorkspaceCreation.ipynb @@ -20,7 +20,7 @@ "##### High Level Overview of Methodology\n", "1. User annotates the relevant cells of the Jupyter notebook with `#| export` directive\n", "2. We then Leverage `nbdev` functionality to export these annotated cells of Jupyter notebook into a Python script\n", - "3. Utilize OpenFL experimental workflow module `WorkspaceExport` to convert the Python script into a OpenFL workspace\n", + "3. Utilize OpenFL experimental workflow module `NotebookTools` to convert the Python script into a OpenFL workspace\n", "4. User can utilize the experimental `fx` commands to deploy and run the federation seamlessly\n", "\n", "\n", @@ -340,7 +340,7 @@ "The following cells convert the Jupyter notebook into a Python script and create a Template Workspace that can be utilized by Aggregator based Workflow\n", "> NOTE: Only Notebook cells that were marked with `#| export` directive shall be included in this Python script\n", "\n", - "We first import `WorkspaceExport` module and execute `WorkspaceExport.export()` that converts the notebook and generates the template workspace. User is required to specify: \n", + "We first import `NotebookTools` module and execute `NotebookTools.export()` that converts the notebook and generates the template workspace. User is required to specify: \n", "1. `notebook_path`: path of the Jupyter notebook that is required to be converted\n", "2. `output_workspace`: path where the converted workspace is stored" ] @@ -353,9 +353,9 @@ "outputs": [], "source": [ "import os\n", - "from openfl.experimental.workflow.workspace_export import WorkspaceExport\n", + "from openfl.experimental.workflow.notebooktools import NotebookTools\n", "\n", - "WorkspaceExport.export(\n", + "NotebookTools.export(\n", " notebook_path='./TwoPartyWorkspaceCreation.ipynb',\n", " output_workspace=f\"/home/{os.environ['USER']}/generated-workspace\"\n", ")" diff --git a/openfl/experimental/workflow/interface/cli/workspace.py b/openfl/experimental/workflow/interface/cli/workspace.py index fb38786b46..07e3e4a5e4 100644 --- a/openfl/experimental/workflow/interface/cli/workspace.py +++ b/openfl/experimental/workflow/interface/cli/workspace.py @@ -30,7 +30,7 @@ print_tree, ) from openfl.experimental.workflow.interface.cli.plan import freeze_plan -from openfl.experimental.workflow.workspace_export import WorkspaceExport +from openfl.experimental.workflow.notebooktools import NotebookTools from openfl.utilities.path_check import is_directory_traversal from openfl.utilities.utils import rmtree from openfl.utilities.workspace import dump_requirements_file @@ -138,7 +138,7 @@ def create_(prefix, custom_template, template, notebook, template_output_dir): + "save your Jupyter Notebook workspace." ) - WorkspaceExport.export( + NotebookTools.export( notebook_path=notebook, output_workspace=template_output_dir, ) diff --git a/openfl/experimental/workflow/notebooktools/__init__.py b/openfl/experimental/workflow/notebooktools/__init__.py new file mode 100644 index 0000000000..41d1e84ced --- /dev/null +++ b/openfl/experimental/workflow/notebooktools/__init__.py @@ -0,0 +1,8 @@ +# Copyright 2020-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +"""openfl.experimental.workflow.notebooktools package.""" + +from openfl.experimental.workflow.notebooktools.code_analyzer import CodeAnalyzer +from openfl.experimental.workflow.notebooktools.notebook_tools import NotebookTools diff --git a/openfl/experimental/workflow/workspace_export/export.py b/openfl/experimental/workflow/notebooktools/code_analyzer.py similarity index 51% rename from openfl/experimental/workflow/workspace_export/export.py rename to openfl/experimental/workflow/notebooktools/code_analyzer.py index 14750de2ff..5c559ecfd3 100644 --- a/openfl/experimental/workflow/workspace_export/export.py +++ b/openfl/experimental/workflow/notebooktools/code_analyzer.py @@ -1,82 +1,43 @@ # Copyright 2020-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 - -"""Workspace Export module.""" - import ast -import importlib import inspect import re -import shutil import sys +from importlib import import_module from logging import getLogger from pathlib import Path -from shutil import copytree -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple import nbformat -import yaml from nbdev.export import nb_export -from openfl.experimental.workflow.federated.plan import Plan -from openfl.experimental.workflow.interface.cli.cli_helper import print_tree - logger = getLogger(__name__) -class WorkspaceExport: - """Convert a LocalRuntime Jupyter Notebook to Aggregator based - FederatedRuntime Workflow. +class CodeAnalyzer: + """Code transforamtion and analysis functionality for NotebookTools Attributes: - notebook_path: Absolute path of jupyter notebook. - template_workspace_path: Path to template workspace provided with - OpenFL. - output_workspace_path: Output directory for new generated workspace - (default="/tmp"). + script_path: Absoluet path to python script. + script_name: Name of the python script. """ - def __init__(self, notebook_path: str, output_workspace: str) -> None: - """Initialize a WorkspaceExport object. + def __init__(self) -> None: + """Initialize CodeTransformer""" + self.script_path = None + self.script_name = None + + def _initialize_script(self, notebook_path: Path, output_path: Path) -> None: + """Initialize and process the script from notebook Args: - notebook_path (str): Path to Jupyter notebook. - output_workspace (str): Path to output_workspace to be - generated. + notebook_path (str): Path to Jupyter notebook. + output_workspace (str): Path to output_workspace to be + generated. """ - - self.notebook_path = Path(notebook_path).resolve() - # Check if the Jupyter notebook exists - if not self.notebook_path.exists() or not self.notebook_path.is_file(): - raise FileNotFoundError(f"The Jupyter notebook at {notebook_path} does not exist.") - - self.output_workspace_path = Path(output_workspace).resolve() - # Regenerate the workspace if it already exists - if self.output_workspace_path.exists(): - shutil.rmtree(self.output_workspace_path) - self.output_workspace_path.parent.mkdir(parents=True, exist_ok=True) - - self.template_workspace_path = ( - Path(f"{__file__}") - .parent.parent.parent.parent.parent.joinpath( - "openfl-workspace", - "experimental", - "workflow", - "AggregatorBasedWorkflow", - "template_workspace", - ) - .resolve(strict=True) - ) - - # Copy template workspace to output directory - self.created_workspace_path = Path( - copytree(self.template_workspace_path, self.output_workspace_path) - ) - logger.info(f"Copied template workspace to {self.created_workspace_path}") - - logger.info("Converting jupter notebook to python script...") - export_filename = self.__get_exp_name() + export_filename = self.__get_exp_name(notebook_path) if export_filename is None: raise NameError( "Please include `#| default_exp ` in " @@ -84,24 +45,19 @@ def __init__(self, notebook_path: str, output_workspace: str) -> None: ) self.script_path = Path( self.__convert_to_python( - self.notebook_path, - self.created_workspace_path.joinpath("src"), + notebook_path, + output_path.joinpath("src"), f"{export_filename}.py", ) ).resolve() - - # Generated python script name without .py extension self.script_name = self.script_path.name.split(".")[0].strip() - # Comment flow.run() so when script is imported flow does not start - # executing - self.__comment_flow_execution() - # This is required as Ray created actors too many actors when - # backend="ray" # NOQA - self.__change_runtime() - - def __get_exp_name(self) -> None: - """Fetch the experiment name from the Jupyter notebook.""" - with open(str(self.notebook_path), "r") as f: + + def __get_exp_name(self, notebook_path: Path) -> None: + """Fetch the experiment name from the Jupyter notebook. + Args: + notebook_path (str): Path to Jupyter notebook. + """ + with open(str(notebook_path), "r") as f: notebook_content = nbformat.read(f, as_version=nbformat.NO_CONVERT) for cell in notebook_content.cells: @@ -115,46 +71,41 @@ def __get_exp_name(self) -> None: def __convert_to_python(self, notebook_path: Path, output_path: Path, export_filename) -> Path: """Converts a Jupyter notebook to a Python script. - Args: notebook_path (Path): The path to the Jupyter notebook file to be converted. output_path (Path): The directory where the exported Python script should be saved. export_filename: The name of the exported Python script file. + + Returns: + Path: The path to the exported Python script file. """ nb_export(notebook_path, output_path) return Path(output_path).joinpath(export_filename).resolve() - def __comment_flow_execution(self) -> None: - """In the python script search for ".run()" and comment it.""" - with open(self.script_path, "r") as f: - data = f.readlines() - for idx, line in enumerate(data): - if ".run()" in line: - data[idx] = f"# {line}" - with open(self.script_path, "w") as f: - f.writelines(data) - - def __change_runtime(self) -> None: - """Change the LocalRuntime backend from ray to single_process.""" - with open(self.script_path, "r") as f: - data = f.read() - - if "backend='ray'" in data or 'backend="ray"' in data: - data = data.replace("backend='ray'", "backend='single_process'").replace( - 'backend="ray"', 'backend="single_process"' - ) + def __import_exported_script(self) -> None: + """ + Imports the generated python script using the importlib module + """ + try: + sys.path.append(str(self.script_path.parent)) + self.exported_script_module = import_module(self.script_name) + self.available_modules_in_exported_script = dir(self.exported_script_module) - with open(self.script_path, "w") as f: - f.write(data) + except ImportError as e: + logger.error(f"Failed to import script {self.script_name}: {e}") + raise def __get_class_arguments(self, class_name) -> list: """Given the class name returns expected class arguments. Args: - class_name (str): Name of the class + class_name (str): The name of the class. + + Returns: + list: A list of expected class arguments. """ # Import python script if not already if not hasattr(self, "exported_script_module"): @@ -192,9 +143,13 @@ def __get_class_name_and_sourcecode_from_parent_class( ) -> Optional[Tuple[Optional[str], Optional[str]]]: """Provided the parent_class name returns derived class source code and name. - Args: - parent_class: FLSpec instance + parent_class: FLSpec instance. + + Returns: + Optional[Tuple[Optional[str], Optional[str]]]: + The source code of the derived class (str). + The name of the derived class (str). """ # Import python script if not already if not hasattr(self, "exported_script_module"): @@ -209,11 +164,13 @@ def __get_class_name_and_sourcecode_from_parent_class( return None, None def __extract_class_initializing_args(self, class_name) -> Dict[str, Any]: - """Provided name of the class returns expected arguments and its - values in the form of a dictionary. - + """Provided name of the class returns expected arguments and it's + values in form of dictionary. Args: - class_name (str): Name of the class + class_name (str): The name of the class. + + Returns: + Dict[str, Any]: A dictionary containing the expected arguments and their values. """ instantiation_args = {"args": {}, "kwargs": {}} @@ -262,83 +219,15 @@ def _clean_value(self, value: str) -> str: value = value.lstrip("[").rstrip("]") return value - def __import_exported_script(self) -> None: - """ - Imports generated python script with help of importlib - """ - - sys.path.append(str(self.script_path.parent)) - self.exported_script_module = importlib.import_module(self.script_name) - self.available_modules_in_exported_script = dir(self.exported_script_module) - - def __read_yaml(self, path) -> None: - with open(path, "r") as y: - return yaml.safe_load(y) - - def __write_yaml(self, path, data) -> None: - with open(path, "w") as y: - yaml.safe_dump(data, y) - - @classmethod - def export_federated( - cls, notebook_path: str, output_workspace: str, director_fqdn: str, tls: bool = False - ) -> Tuple[str, str]: - """Exports workspace for FederatedRuntime. - - Args: - notebook_path (str): Path to the Jupyter notebook. - output_workspace (str): Path for the generated workspace directory. - director_fqdn (str): Fully qualified domain name of the director node. - tls (bool, optional): Whether to use TLS for the connection. - - Returns: - Tuple[str, str]: A tuple containing: - (archive_path, flow_class_name). - """ - instance = cls(notebook_path, output_workspace) - instance.generate_requirements() - instance.generate_plan_yaml(director_fqdn, tls) - instance._clean_generated_workspace() - print_tree(output_workspace, level=2) - return instance.generate_experiment_archive() - - @classmethod - def export(cls, notebook_path: str, output_workspace: str) -> None: - """Exports workspace to output_workspace. - - Args: - notebook_path (str): Path to the Jupyter notebook. - output_workspace (str): Path for the generated workspace directory. - """ - instance = cls(notebook_path, output_workspace) - instance.generate_requirements() - instance.generate_plan_yaml() - instance.generate_data_yaml() - print_tree(output_workspace, level=2) - - def generate_experiment_archive(self) -> Tuple[str, str]: - """ - Create archive of the generated workspace + def get_requirements(self) -> Tuple[List[str], List[int], List[str]]: + """Extract pip libraries from the script Returns: - Tuple[str, str]: A tuple containing: - (generated_workspace_path, archive_path, flow_class_name). + tuple: A tuple containing: + requirements (list of str): List of pip libraries found in the script. + line_nos (list of int): List of line numbers where "pip install" commands are found. + data (list of str): The entire script data as a list of lines. """ - parent_directory = self.output_workspace_path.parent - archive_path = parent_directory / "experiment" - - # Create a ZIP archive of the generated_workspace directory - arch_path = shutil.make_archive(str(archive_path), "zip", str(self.output_workspace_path)) - - print(f"Archive created at {archive_path}.zip") - - return arch_path, self.flow_class_name - - # Have to do generate_requirements before anything else - # because these !pip commands needs to be removed from python script - def generate_requirements(self) -> None: - """Finds pip libraries mentioned in exported python script and append - in workspace/requirements.txt.""" data = None with open(self.script_path, "r") as f: requirements = [] @@ -353,133 +242,135 @@ def generate_requirements(self) -> None: if not line.startswith("#") and "-r" not in line and "openfl.git" not in line: requirements.append(f"{line.split(' ')[-1].strip()}\n") - requirements_filepath = str( - self.created_workspace_path.joinpath("requirements.txt").resolve() - ) - - # Write libraries found in requirements.txt - with open(requirements_filepath, "a") as f: - f.writelines(requirements) + return requirements, line_nos, data - # Delete pip requirements from python script - # if not we won't be able to import python script. + def remove_lines(self, data: List[str], line_nos: List[int]) -> None: + """Removes pip install lines from the script + Args: + data (List[str]): The entire script data as a list of lines. + line_nos (List[int]): List of line numbers where "pip install" commands are found. + """ with open(self.script_path, "w") as f: for i, line in enumerate(data): if i not in line_nos: f.write(line) - def _clean_generated_workspace(self) -> None: + def get_flow_class_details(self, parent_class) -> Dict[str, Any]: """ - Remove cols.yaml and data.yaml from the generated workspace - as these are not needed in FederatedRuntime (Director based workflow) + Retrieves details of a flow class that inherits from the given parent clas + Args: + parent_class: The parent class (FLSpec instance). + Returns: + Dict[str, Any]: A dictionary containing: + flow_class_name (str): The name of the flow class. + expected_args (List[str]): The expected arguments for the flow class. + init_args (Dict[str, Any]): The initialization arguments for the flow class. """ - cols_file = self.output_workspace_path.joinpath("plan", "cols.yaml") - data_file = self.output_workspace_path.joinpath("plan", "data.yaml") + _, flow_class_name = self.__get_class_name_and_sourcecode_from_parent_class(parent_class) + if not flow_class_name: + raise ValueError("No flow class found that inherits from FLSpec") - if cols_file.exists(): - cols_file.unlink() - if data_file.exists(): - data_file.unlink() + # Get expected arguments + expected_arguments = self.__get_class_arguments(flow_class_name) - def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> None: - """ - Generates plan.yaml + # get initialization arguments + init_args = self.__extract_class_initializing_args(flow_class_name) + + return { + "flow_class_name": flow_class_name, + "expected_args": expected_arguments, + "init_args": init_args, + } + def analyze_flow_configuration(self, flow_details: Dict[str, Any]) -> Dict[str, Any]: + """Analyze flow configuration from flow details. Args: - director_fqdn (str): Fully qualified domain name of the director node. - tls (bool, optional): Whether to use TLS for the connection. + flow_details (Dict[str, Any]): Dictionary containing flow class details. + + Returns: + Dict[str, Any]: Dictionary containing the plan configuration """ - flspec = importlib.import_module("openfl.experimental.workflow.interface").FLSpec - # Get flow classname - _, self.flow_class_name = self.__get_class_name_and_sourcecode_from_parent_class(flspec) - # Get expected arguments of flow class - self.flow_class_expected_arguments = self.__get_class_arguments(self.flow_class_name) - # Get provided arguments to flow class - self.arguments_passed_to_initialize = self.__extract_class_initializing_args( - self.flow_class_name - ) - - plan = self.created_workspace_path.joinpath("plan", "plan.yaml").resolve() - data = self.__read_yaml(plan) - if data is None: - data = {} - data["federated_flow"] = {"settings": {}, "template": ""} - - data["federated_flow"]["template"] = f"src.{self.script_name}.{self.flow_class_name}" - - def update_dictionary(args: dict, data: dict, dtype: str = "args"): + flow_config = { + "federated_flow": { + "settings": {}, + "template": f"src.{self.script_name}.{flow_details['flow_class_name']}", + } + } + + def update_dictionary(args: dict, dtype: str = "args") -> None: + """Update plan configuration with argument values. + + Args: + args: Dictionary of arguments to process + dtype: Type of arguments ('args' or 'kwargs') + """ for idx, (k, v) in enumerate(args.items()): if dtype == "args": v = getattr(self.exported_script_module, str(k), None) - if v is not None and type(v) not in (int, str, bool): + if v is not None and not isinstance(v, (int, str, bool)): v = f"src.{self.script_name}.{k}" - k = self.flow_class_expected_arguments[idx] + k = flow_details["expected_args"][idx] elif dtype == "kwargs": - if v is not None and type(v) not in (int, str, bool): - v = f"src.{self.script_name}.{k}" - data["federated_flow"]["settings"].update({k: v}) - - # Find positional arguments of flow class and it's values - pos_args = self.arguments_passed_to_initialize["args"] - update_dictionary(pos_args, data, dtype="args") - # Find kwargs of flow class and it's values - kw_args = self.arguments_passed_to_initialize["kwargs"] - update_dictionary(kw_args, data, dtype="kwargs") - - # Updating the aggregator address with director's hostname and tls settings in plan.yaml - if director_fqdn: - network_settings = Plan.parse(plan).config["network"] - data["network"] = network_settings - data["network"]["settings"]["agg_addr"] = director_fqdn - data["network"]["settings"]["tls"] = tls - - self.__write_yaml(plan, data) - - def generate_data_yaml(self) -> None: - """Generates data.yaml.""" - # Import python script if not already + if v is not None and not isinstance(v, (int, str, bool)): + v = f"src.{self.script_name}.{v}" + flow_config["federated_flow"]["settings"].update({k: v}) + + # Process arguments + pos_args = flow_details["init_args"].get("args", {}) + update_dictionary(pos_args, "args") + kw_args = flow_details["init_args"].get("kwargs", {}) + update_dictionary(kw_args, "kwargs") + + return flow_config + + def get_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: + """Get federated flow class and runtime information. + Args: + flow_class_name (str): The name of the federated flow class to retrieve. + + Returns: + tuple: A tuple containing the runtime instance and the flow class name. + """ if not hasattr(self, "exported_script_module"): self.__import_exported_script() - self._find_flow_class_name_if_needed() - # Import flow class - federated_flow_class = getattr(self.exported_script_module, self.flow_class_name) - + federated_flow_class = getattr(self.exported_script_module, flow_class_name) flow_name, runtime = self._find_runtime_instance(federated_flow_class) - data_yaml = self.created_workspace_path.joinpath("plan", "data.yaml").resolve() - data = self._read_or_initialize_yaml(data_yaml) - runtime_name = "runtime_local" - runtime_created = self._process_aggregator(runtime, data, flow_name, runtime_name) - self._process_collaborators(runtime, data, flow_name, runtime_created, runtime_name) - self.__write_yaml(data_yaml, data) - - def _find_flow_class_name_if_needed(self): - """Find the flow class name if not already found.""" - if not hasattr(self, "flow_class_name"): - flspec = importlib.import_module("openfl.experimental.workflow.interface").FLSpec - _, self.flow_class_name = self.__get_class_name_and_sourcecode_from_parent_class(flspec) - - def _find_runtime_instance(self, federated_flow_class): - """Find the runtime instance.""" + return runtime, flow_name + + def _find_runtime_instance(self, federated_flow_class) -> Tuple[str, object]: + """Find runtime instance + Args: + federated_flow_class: The class object of the federated flow. + + Returns: + tuple: A tuple containing the name of the flow instance and the runtime instance. + """ for t in self.available_modules_in_exported_script: + tempstring = t t = getattr(self.exported_script_module, t) if isinstance(t, federated_flow_class): + flow_name = tempstring if not hasattr(t, "_runtime"): raise AttributeError("Unable to locate LocalRuntime instantiation") runtime = t._runtime if not hasattr(runtime, "collaborators"): raise AttributeError("LocalRuntime instance does not have collaborators") - return runtime + return flow_name, runtime raise AttributeError("Runtime instance not found") - def _read_or_initialize_yaml(self, data_yaml): - """Read or initialize the YAML data.""" - data = self.__read_yaml(data_yaml) - return data if data is not None else {} + def process_aggregator(self, runtime, data, flow_name, runtime_name) -> bool: + """Process the aggregator details. + Args: + runtime (Any): The runtime instance containing the aggregator. + data (Dict[str, Any]): The data dictionary to be updated with aggregator details. + flow_name (str): The name of the flow. + runtime_name (str): The name of the runtime. - def _process_aggregator(self, runtime, data, flow_name, runtime_name): - """Process the aggregator details.""" + Returns: + bool: A boolean indicating whether the runtime was created. + """ aggregator = runtime._aggregator runtime_created = False private_attrs_callable = aggregator.private_attributes_callable @@ -516,13 +407,26 @@ def _process_aggregator(self, runtime, data, flow_name, runtime_name): } return runtime_created - def _process_collaborators(self, runtime, data, flow_name, runtime_created, runtime_name): - """Process the collaborators.""" + def process_collaborators( + self, runtime, data, flow_name, runtime_created, runtime_name + ) -> Dict[str, Any]: + """Process the collaborators. + Args: + runtime (Any): The runtime instance containing the collaborators. + data (Dict[str, Any]): The data dictionary to be updated with collaborator details. + flow_name (str): The name of the flow. + runtime_created (bool): Flag indicating if the runtime has been created. + runtime_name (str): The name of the runtime. + + Returns: + Dict[str, Any]: The updated data dictionary with collaborator details. + """ collaborators = runtime._LocalRuntime__collaborators arguments_passed_to_initialize = self.__extract_class_initializing_args("Collaborator")[ "kwargs" ] runtime_collab_created = False + for collab in collaborators.values(): collab_name = collab.get_name() callable_func = collab.private_attributes_callable @@ -559,3 +463,5 @@ def _process_collaborators(self, runtime, data, flow_name, runtime_created, runt data[collab_name] = { "private_attributes": f"src.{self.script_name}.{collab_name}_private_attributes" } + + return data diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py new file mode 100644 index 0000000000..d2480ad88c --- /dev/null +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -0,0 +1,253 @@ +# Copyright 2020-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Notebook Tools module.""" + +import shutil +from importlib import import_module +from logging import getLogger +from pathlib import Path +from shutil import copytree +from typing import Tuple + +import yaml + +from openfl.experimental.workflow.federated.plan import Plan +from openfl.experimental.workflow.interface.cli.cli_helper import print_tree +from openfl.experimental.workflow.notebooktools.code_analyzer import CodeAnalyzer + +logger = getLogger(__name__) + + +class NotebookTools: + """The class is responsible for converting workflow API + into an OpenFL workspace + + Attributes: + notebook_path: Absolute path of jupyter notebook. + template_workspace_path: Path to template workspace provided with + OpenFL. + output_workspace_path: Output directory for new generated workspace + (default="/tmp"). + """ + + def __init__(self, notebook_path: str, output_workspace: str) -> None: + """Initialize a NotebookTools object. + Args: + notebook_path (str): The path to the Jupyter notebook that needs to be converted. + output_workspace (str): The directory where the converted workspace will be saved + workspace + """ + self.notebook_path = Path(notebook_path).resolve() + # Check if the Jupyter notebook exists + if not self.notebook_path.exists() or not self.notebook_path.is_file(): + raise FileNotFoundError(f"The Jupyter notebook at {notebook_path} does not exist.") + + self.output_workspace_path = Path(output_workspace).resolve() + # Regenerate the workspace if it already exists + if self.output_workspace_path.exists(): + shutil.rmtree(self.output_workspace_path) + self.output_workspace_path.parent.mkdir(parents=True, exist_ok=True) + + self.template_workspace_path = ( + Path(f"{__file__}") + .parent.parent.parent.parent.parent.joinpath( + "openfl-workspace", + "experimental", + "workflow", + "AggregatorBasedWorkflow", + "template_workspace", + ) + .resolve(strict=True) + ) + + # Copy template workspace to output directory + copytree(self.template_workspace_path, self.output_workspace_path) + + logger.info(f"Copied template workspace to {self.output_workspace_path}") + + # Initialize CodeAnalyzer object + self.code_analyzer = CodeAnalyzer() + # Initialize the script with in the CodeAnalyzer + self.code_analyzer._initialize_script(self.notebook_path, self.output_workspace_path) + + @classmethod + def export_federated( + cls, notebook_path: str, output_workspace: str, director_fqdn: str, tls: bool = False + ) -> Tuple[str, str]: + """Exports workspace for FederatedRuntime. + + Args: + notebook_path (str): Path to the Jupyter notebook. + output_workspace (str): Path for the generated workspace directory. + director_fqdn (str): Fully qualified domain name of the director node. + tls (bool, optional): Whether to use TLS for the connection. + + Returns: + Tuple[str, str]: A tuple containing: + (archive_path, flow_class_name). + """ + instance = cls(notebook_path, output_workspace) + instance.generate_requirements() + instance.generate_plan_yaml(director_fqdn, tls) + instance._clean_generated_workspace() + print_tree(output_workspace, level=2) + + @classmethod + def export(cls, notebook_path: str, output_workspace: str) -> None: + """Exports workspace to output_workspace. + Args: + notebook_path (str): Path to the Jupyter notebook. + output_workspace (str): Path for the generated workspace directory. + """ + instance = cls(notebook_path, output_workspace) + instance.generate_requirements() + instance.generate_plan_yaml() + instance.generate_data_yaml() + print_tree(output_workspace, level=2) + + def generate_requirements(self) -> None: + """Extracts pip libraries mentioned in exported python script and append + in workspace/requirements.txt. + """ + requirements, line_numbers, data = self.code_analyzer.get_requirements() + + requirements_filepath = str( + self.output_workspace_path.joinpath("requirements.txt").resolve() + ) + + # Write libraries found in requirements.txt + with open(requirements_filepath, "a") as f: + f.writelines(requirements) + + # Delete pip requirements from the python script to ensure it can be imported + self.code_analyzer.remove_lines(data, line_numbers) + + def _clean_generated_workspace(self) -> None: + """ + Remove cols.yaml and data.yaml from the generated workspace + as these are not needed in FederatedRuntime (Director based workflow) + + """ + cols_file = self.output_workspace_path.joinpath("plan", "cols.yaml") + data_file = self.output_workspace_path.joinpath("plan", "data.yaml") + + if cols_file.exists(): + cols_file.unlink() + if data_file.exists(): + data_file.unlink() + + def __read_yaml(self, path) -> dict: + """Reads a YAML file and returns its contents. + Args: + path (str): The path to the YAML file. + + Returns: + dict: The contents of the YAML file. + """ + with open(path, "r") as y: + return yaml.safe_load(y) + + def __write_yaml(self, path, data) -> None: + """Writes data to a YAML file. + Args: + path (str): The path to the YAML file. + data (dict): The data to write to the YAML file. + """ + with open(path, "w") as y: + yaml.safe_dump(data, y) + + def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> None: + """Generate the plan.yaml file containing the federated learning flow configuration + Args: + director_fqdn (str): Fully qualified domain name of the director node. + tls (bool, optional): Whether to use TLS for the connection. + """ + flspec = import_module("openfl.experimental.workflow.interface").FLSpec + # Get the flow_class details + flow_details = self.code_analyzer.get_flow_class_details(flspec) + # Analyze and generate plan configuration + flow_config = self.code_analyzer.analyze_flow_configuration(flow_details) + + # Determine the path for the plan.yaml file + plan = self.output_workspace_path.joinpath("plan", "plan.yaml").resolve() + + ## Read or initialize the YAML data + data = self._read_or_initialize_plan_yaml(plan) + + # Update the plan_configuration with the analyzed flow configuration + data["federated_flow"].update(flow_config["federated_flow"]) + + # Updating the aggregator address with director's hostname and tls settings in plan.yaml + if director_fqdn: + network_settings = Plan.parse(plan).config["network"] + data["network"] = network_settings + data["network"]["settings"]["agg_addr"] = director_fqdn + data["network"]["settings"]["tls"] = tls + + # Write the updated plan configuraiton to the plan.yaml file + self.__write_yaml(plan, data) + + def generate_data_yaml(self) -> None: + """Generate data.yaml with runtime configuration""" + # Ensure flow_class is available + flow_class_name = self._ensure_flow_class() + + # Get runtime information using CodeAnalyzer + runtime, flow_name = self.code_analyzer.get_runtime_info(flow_class_name) + + # Determine the path for the data.yaml + data_yaml = self.output_workspace_path.joinpath("plan", "data.yaml").resolve() + + # Read or initialize the YAML data + data = self._read_or_initialize_data_yaml(data_yaml) + + # Initiaize runtime name + runtime_name = "local_runtime" + + # Process aggregator information using CodeAnalyzer + runtime_created = self.code_analyzer.process_aggregator( + runtime, data, flow_name, runtime_name + ) + + # Process collaborator information using CodeAnalyzer + data = self.code_analyzer.process_collaborators( + runtime, data, flow_name, runtime_created, runtime_name + ) + + # Write updated data configuration to the data.yaml file + self.__write_yaml(data_yaml, data) + + def _ensure_flow_class(self) -> str: + """Ensure flow class is available and returns its name""" + if not hasattr(self, "flow_class_name"): + flspsec = import_module("openfl.experimental.workflow.interface").FLSpec + flow_details = self.code_analyzer.get_flow_class_details(flspsec) + self.flow_class_name = flow_details["flow_class_name"] + + return self.flow_class_name + + def _read_or_initialize_plan_yaml(self, plan_yaml) -> dict: + """Read or initialize the plan YAML data. + Args: + plan_yaml (Path): The path to the plan.yaml file. + + Returns: + dict: The data dictionary from plan.yaml. + """ + data = self.__read_yaml(plan_yaml) + if data is None: + data = {} + data["federated_flow"] = {"settings": {}, "template": ""} + return data + + def _read_or_initialize_data_yaml(self, data_yaml) -> dict: + """Read or initialize the YAML data. + Args: + data_yaml (Path): The path to the data.yaml file. + + Returns: + dict: The data dictionary from data.yaml + """ + data = self.__read_yaml(data_yaml) + return data if data is not None else {} diff --git a/openfl/experimental/workflow/runtime/federated_runtime.py b/openfl/experimental/workflow/runtime/federated_runtime.py index 861c27e059..a4bc091ed5 100644 --- a/openfl/experimental/workflow/runtime/federated_runtime.py +++ b/openfl/experimental/workflow/runtime/federated_runtime.py @@ -16,9 +16,9 @@ import dill from tabulate import tabulate +from openfl.experimental.workflow.notebooktools import NotebookTools from openfl.experimental.workflow.runtime.runtime import Runtime from openfl.experimental.workflow.transport.grpc.director_client import DirectorClient -from openfl.experimental.workflow.workspace_export import WorkspaceExport logger = logging.getLogger(__name__) @@ -140,13 +140,13 @@ def _create_director_client(self) -> DirectorClient: def prepare_workspace_archive(self) -> Tuple[Path, str]: """ - Prepare workspace archive using WorkspaceExport. + Prepare workspace archive using NotebookTools. Returns: Tuple[Path, str]: A tuple containing the path of the created archive and the experiment name. """ - archive_path, exp_name = WorkspaceExport.export_federated( + archive_path, exp_name = NotebookTools.export_federated( notebook_path=self.notebook_path, output_workspace="./generated_workspace", director_fqdn=self.director["director_node_fqdn"], diff --git a/openfl/experimental/workflow/workspace_export/__init__.py b/openfl/experimental/workflow/workspace_export/__init__.py deleted file mode 100644 index f947a0fc36..0000000000 --- a/openfl/experimental/workflow/workspace_export/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright 2020-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openfl.experimental.workflow.workspace_export.export import WorkspaceExport From 259dcfc1160ac9604faf97a475e15703f37d8ef0 Mon Sep 17 00:00:00 2001 From: refai06 Date: Tue, 11 Feb 2025 15:57:12 +0530 Subject: [PATCH 2/7] Incorporated comments Signed-off-by: refai06 --- .../workflow/notebooktools/__init__.py | 2 +- .../workflow/notebooktools/code_analyzer.py | 85 +++++++----- .../workflow/notebooktools/notebook_tools.py | 123 +++++++++--------- 3 files changed, 118 insertions(+), 92 deletions(-) diff --git a/openfl/experimental/workflow/notebooktools/__init__.py b/openfl/experimental/workflow/notebooktools/__init__.py index 41d1e84ced..1f29ec701d 100644 --- a/openfl/experimental/workflow/notebooktools/__init__.py +++ b/openfl/experimental/workflow/notebooktools/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2020-2024 Intel Corporation +# Copyright 2020-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/openfl/experimental/workflow/notebooktools/code_analyzer.py b/openfl/experimental/workflow/notebooktools/code_analyzer.py index 5c559ecfd3..1c0a086c45 100644 --- a/openfl/experimental/workflow/notebooktools/code_analyzer.py +++ b/openfl/experimental/workflow/notebooktools/code_analyzer.py @@ -1,4 +1,4 @@ -# Copyright 2020-2024 Intel Corporation +# Copyright 2020-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import ast @@ -17,32 +17,30 @@ class CodeAnalyzer: - """Code transforamtion and analysis functionality for NotebookTools + """Code analysis and transformation functionality for NotebookTools Attributes: - script_path: Absoluet path to python script. + script_path: Absolute path to python script. script_name: Name of the python script. """ - def __init__(self) -> None: - """Initialize CodeTransformer""" + def __init__(self, notebook_path: Path, output_path: Path) -> None: + """Initialize CodeAnalzer and process the script from notebook - self.script_path = None - self.script_name = None - - def _initialize_script(self, notebook_path: Path, output_path: Path) -> None: - """Initialize and process the script from notebook Args: - notebook_path (str): Path to Jupyter notebook. - output_workspace (str): Path to output_workspace to be - generated. + notebook_path (Path): The path to the Jupyter notebook that needs to be converted. + output_path (Path): The directory where the converted Python script will be saved. """ + logger.info("Converting jupter notebook to python script...") + + # Extract the export filename from the notebook export_filename = self.__get_exp_name(notebook_path) if export_filename is None: raise NameError( "Please include `#| default_exp ` in " "the first cell of the notebook." ) + # Convert the notebook to a Python script and set the script path self.script_path = Path( self.__convert_to_python( notebook_path, @@ -50,8 +48,16 @@ def _initialize_script(self, notebook_path: Path, output_path: Path) -> None: f"{export_filename}.py", ) ).resolve() + # Generated python script name self.script_name = self.script_path.name.split(".")[0].strip() + # Comment out flow.run() to prevent the flow from starting execution + # automatically when the script is imported. + self.__comment_flow_execution() + + # Change the runtime backend from 'ray' to 'single_process' + self.__change_runtime() + def __get_exp_name(self, notebook_path: Path) -> None: """Fetch the experiment name from the Jupyter notebook. Args: @@ -85,6 +91,29 @@ def __convert_to_python(self, notebook_path: Path, output_path: Path, export_fil return Path(output_path).joinpath(export_filename).resolve() + def __comment_flow_execution(self) -> None: + """Comment out lines containing '.run()' in the specified Python script""" + with open(self.script_path, "r") as f: + data = f.readlines() + for idx, line in enumerate(data): + if ".run()" in line: + data[idx] = f"# {line}" + with open(self.script_path, "w") as f: + f.writelines(data) + + def __change_runtime(self) -> None: + """Change the LocalRuntime backend from ray to single_process.""" + with open(self.script_path, "r") as f: + data = f.read() + + if "backend='ray'" in data or 'backend="ray"' in data: + data = data.replace("backend='ray'", "backend='single_process'").replace( + 'backend="ray"', 'backend="single_process"' + ) + + with open(self.script_path, "w") as f: + f.write(data) + def __import_exported_script(self) -> None: """ Imports the generated python script using the importlib module @@ -283,8 +312,8 @@ def get_flow_class_details(self, parent_class) -> Dict[str, Any]: "init_args": init_args, } - def analyze_flow_configuration(self, flow_details: Dict[str, Any]) -> Dict[str, Any]: - """Analyze flow configuration from flow details. + def fetch_flow_configuration(self, flow_details: Dict[str, Any]) -> Dict[str, Any]: + """Get flow configuration from flow details. Args: flow_details (Dict[str, Any]): Dictionary containing flow class details. @@ -324,7 +353,7 @@ def update_dictionary(args: dict, dtype: str = "args") -> None: return flow_config - def get_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: + def get_flow_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: """Get federated flow class and runtime information. Args: flow_class_name (str): The name of the federated flow class to retrieve. @@ -336,10 +365,10 @@ def get_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: self.__import_exported_script() federated_flow_class = getattr(self.exported_script_module, flow_class_name) - flow_name, runtime = self._find_runtime_instance(federated_flow_class) - return runtime, flow_name + flow_instance_name, runtime = self._find_flow_instance_runtime(federated_flow_class) + return runtime, flow_instance_name - def _find_runtime_instance(self, federated_flow_class) -> Tuple[str, object]: + def _find_flow_instance_runtime(self, federated_flow_class) -> Tuple[str, object]: """Find runtime instance Args: federated_flow_class: The class object of the federated flow. @@ -351,21 +380,21 @@ def _find_runtime_instance(self, federated_flow_class) -> Tuple[str, object]: tempstring = t t = getattr(self.exported_script_module, t) if isinstance(t, federated_flow_class): - flow_name = tempstring + flow_instance_name = tempstring if not hasattr(t, "_runtime"): raise AttributeError("Unable to locate LocalRuntime instantiation") runtime = t._runtime if not hasattr(runtime, "collaborators"): raise AttributeError("LocalRuntime instance does not have collaborators") - return flow_name, runtime + return flow_instance_name, runtime raise AttributeError("Runtime instance not found") - def process_aggregator(self, runtime, data, flow_name, runtime_name) -> bool: + def process_aggregator(self, runtime, data, flow_instance_name, runtime_name) -> bool: """Process the aggregator details. Args: runtime (Any): The runtime instance containing the aggregator. data (Dict[str, Any]): The data dictionary to be updated with aggregator details. - flow_name (str): The name of the flow. + flow_instance_name (str): The name of the flow instance. runtime_name (str): The name of the runtime. Returns: @@ -397,7 +426,7 @@ def process_aggregator(self, runtime, data, flow_name, runtime_name) -> bool: elif aggregator_private_attributes: runtime_created = True with open(self.script_path, "a") as f: - f.write(f"\n{runtime_name} = {flow_name}._runtime\n") + f.write(f"\n{runtime_name} = {flow_instance_name}._runtime\n") f.write( f"\naggregator_private_attributes = " f"{runtime_name}._aggregator.private_attributes\n" @@ -408,13 +437,13 @@ def process_aggregator(self, runtime, data, flow_name, runtime_name) -> bool: return runtime_created def process_collaborators( - self, runtime, data, flow_name, runtime_created, runtime_name + self, runtime, data, flow_instance_name, runtime_created, runtime_name ) -> Dict[str, Any]: """Process the collaborators. Args: runtime (Any): The runtime instance containing the collaborators. data (Dict[str, Any]): The data dictionary to be updated with collaborator details. - flow_name (str): The name of the flow. + flow_instance_name (str): The name of the flow instance. runtime_created (bool): Flag indicating if the runtime has been created. runtime_name (str): The name of the runtime. @@ -449,7 +478,7 @@ def process_collaborators( elif private_attributes: with open(self.script_path, "a") as f: if not runtime_created: - f.write(f"\n{runtime_name} = {flow_name}._runtime\n") + f.write(f"\n{runtime_name} = {flow_instance_name}._runtime\n") runtime_created = True if not runtime_collab_created: f.write( @@ -463,5 +492,3 @@ def process_collaborators( data[collab_name] = { "private_attributes": f"src.{self.script_name}.{collab_name}_private_attributes" } - - return data diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py index d2480ad88c..f06240b8cd 100644 --- a/openfl/experimental/workflow/notebooktools/notebook_tools.py +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -1,4 +1,4 @@ -# Copyright 2020-2024 Intel Corporation +# Copyright 2020-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Notebook Tools module.""" @@ -10,8 +10,6 @@ from shutil import copytree from typing import Tuple -import yaml - from openfl.experimental.workflow.federated.plan import Plan from openfl.experimental.workflow.interface.cli.cli_helper import print_tree from openfl.experimental.workflow.notebooktools.code_analyzer import CodeAnalyzer @@ -20,8 +18,8 @@ class NotebookTools: - """The class is responsible for converting workflow API - into an OpenFL workspace + """Class to convert LocalRuntime Jupyter notebook based on Workflow API into a + workspace that could be deployed on distributed infrastructure Attributes: notebook_path: Absolute path of jupyter notebook. @@ -67,9 +65,7 @@ def __init__(self, notebook_path: str, output_workspace: str) -> None: logger.info(f"Copied template workspace to {self.output_workspace_path}") # Initialize CodeAnalyzer object - self.code_analyzer = CodeAnalyzer() - # Initialize the script with in the CodeAnalyzer - self.code_analyzer._initialize_script(self.notebook_path, self.output_workspace_path) + self.code_analyzer = CodeAnalyzer(self.notebook_path, self.output_workspace_path) @classmethod def export_federated( @@ -92,6 +88,7 @@ def export_federated( instance.generate_plan_yaml(director_fqdn, tls) instance._clean_generated_workspace() print_tree(output_workspace, level=2) + return instance.generate_experiment_archive() @classmethod def export(cls, notebook_path: str, output_workspace: str) -> None: @@ -106,9 +103,27 @@ def export(cls, notebook_path: str, output_workspace: str) -> None: instance.generate_data_yaml() print_tree(output_workspace, level=2) + def generate_experiment_archive(self) -> Tuple[str, str]: + """ + Create archive of the generated workspace + + Returns: + Tuple[str, str]: A tuple containing: + (archive_path, flow_class_name). + """ + parent_directory = self.output_workspace_path.parent + archive_path = parent_directory / "experiment" + + # Create a ZIP archive of the generated_workspace directory + arch_path = shutil.make_archive(str(archive_path), "zip", str(self.output_workspace_path)) + + print(f"Archive created at {archive_path}.zip") + + return arch_path, self.flow_class_name + def generate_requirements(self) -> None: - """Extracts pip libraries mentioned in exported python script and append - in workspace/requirements.txt. + """Extracts pip libraries from exported python script + and append in workspace/requirements.txt """ requirements, line_numbers, data = self.code_analyzer.get_requirements() @@ -137,43 +152,27 @@ def _clean_generated_workspace(self) -> None: if data_file.exists(): data_file.unlink() - def __read_yaml(self, path) -> dict: - """Reads a YAML file and returns its contents. - Args: - path (str): The path to the YAML file. - - Returns: - dict: The contents of the YAML file. - """ - with open(path, "r") as y: - return yaml.safe_load(y) - - def __write_yaml(self, path, data) -> None: - """Writes data to a YAML file. - Args: - path (str): The path to the YAML file. - data (dict): The data to write to the YAML file. - """ - with open(path, "w") as y: - yaml.safe_dump(data, y) - def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> None: - """Generate the plan.yaml file containing the federated learning flow configuration + """Generate the plan.yaml Args: director_fqdn (str): Fully qualified domain name of the director node. tls (bool, optional): Whether to use TLS for the connection. """ - flspec = import_module("openfl.experimental.workflow.interface").FLSpec + # Get the flow_class details - flow_details = self.code_analyzer.get_flow_class_details(flspec) - # Analyze and generate plan configuration - flow_config = self.code_analyzer.analyze_flow_configuration(flow_details) + flow_details = self._extract_flow_details() + + # Get flow_class_name + self.flow_class_name = flow_details["flow_class_name"] + + # Get flow configuration + flow_config = self.code_analyzer.fetch_flow_configuration(flow_details) # Determine the path for the plan.yaml file plan = self.output_workspace_path.joinpath("plan", "plan.yaml").resolve() - ## Read or initialize the YAML data - data = self._read_or_initialize_plan_yaml(plan) + # Initialize the YAML data + data = self._initialize_plan_yaml(plan) # Update the plan_configuration with the analyzed flow configuration data["federated_flow"].update(flow_config["federated_flow"]) @@ -186,68 +185,68 @@ def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> No data["network"]["settings"]["tls"] = tls # Write the updated plan configuraiton to the plan.yaml file - self.__write_yaml(plan, data) + Plan.dump(plan, data) def generate_data_yaml(self) -> None: - """Generate data.yaml with runtime configuration""" - # Ensure flow_class is available - flow_class_name = self._ensure_flow_class() + """Generate data.yaml""" + + # Get flow class_name + if not hasattr(self, "flow_class_name"): + flow_details = self._extract_flow_details() + self.flow_class_name = flow_details["flow_class_name"] # Get runtime information using CodeAnalyzer - runtime, flow_name = self.code_analyzer.get_runtime_info(flow_class_name) + runtime, flow_instance_name = self.code_analyzer.get_flow_runtime_info(self.flow_class_name) # Determine the path for the data.yaml data_yaml = self.output_workspace_path.joinpath("plan", "data.yaml").resolve() - # Read or initialize the YAML data - data = self._read_or_initialize_data_yaml(data_yaml) + # Initialize the YAML data + data = self._initialize_data_yaml(data_yaml) # Initiaize runtime name runtime_name = "local_runtime" # Process aggregator information using CodeAnalyzer runtime_created = self.code_analyzer.process_aggregator( - runtime, data, flow_name, runtime_name + runtime, data, flow_instance_name, runtime_name ) # Process collaborator information using CodeAnalyzer - data = self.code_analyzer.process_collaborators( - runtime, data, flow_name, runtime_created, runtime_name + self.code_analyzer.process_collaborators( + runtime, data, flow_instance_name, runtime_created, runtime_name ) # Write updated data configuration to the data.yaml file - self.__write_yaml(data_yaml, data) - - def _ensure_flow_class(self) -> str: - """Ensure flow class is available and returns its name""" - if not hasattr(self, "flow_class_name"): - flspsec = import_module("openfl.experimental.workflow.interface").FLSpec - flow_details = self.code_analyzer.get_flow_class_details(flspsec) - self.flow_class_name = flow_details["flow_class_name"] + Plan.dump(data_yaml, data) - return self.flow_class_name + def _extract_flow_details(self) -> str: + """Extract the flow class details""" + flspsec = import_module("openfl.experimental.workflow.interface").FLSpec + flow_details = self.code_analyzer.get_flow_class_details(flspsec) + return flow_details - def _read_or_initialize_plan_yaml(self, plan_yaml) -> dict: - """Read or initialize the plan YAML data. + def _initialize_plan_yaml(self, plan_yaml) -> dict: + """Load or initialize the plan YAML data. Args: plan_yaml (Path): The path to the plan.yaml file. Returns: dict: The data dictionary from plan.yaml. """ - data = self.__read_yaml(plan_yaml) + data = Plan.load(plan_yaml) if data is None: data = {} data["federated_flow"] = {"settings": {}, "template": ""} return data - def _read_or_initialize_data_yaml(self, data_yaml) -> dict: - """Read or initialize the YAML data. + def _initialize_data_yaml(self, data_yaml) -> dict: + """Load or initialize the YAML data. Args: data_yaml (Path): The path to the data.yaml file. Returns: dict: The data dictionary from data.yaml """ - data = self.__read_yaml(data_yaml) + data = Plan.load(data_yaml) return data if data is not None else {} From 72874ad5d0a32cd350160a7e32907c6ef9d36e46 Mon Sep 17 00:00:00 2001 From: refai06 Date: Fri, 14 Feb 2025 09:38:19 +0530 Subject: [PATCH 3/7] Improvements added Signed-off-by: refai06 --- .../workflow/notebooktools/code_analyzer.py | 31 ++++++++++++++++--- .../workflow/notebooktools/notebook_tools.py | 22 +++++++------ 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/openfl/experimental/workflow/notebooktools/code_analyzer.py b/openfl/experimental/workflow/notebooktools/code_analyzer.py index 1c0a086c45..2a065c52fc 100644 --- a/openfl/experimental/workflow/notebooktools/code_analyzer.py +++ b/openfl/experimental/workflow/notebooktools/code_analyzer.py @@ -58,7 +58,7 @@ def __init__(self, notebook_path: Path, output_path: Path) -> None: # Change the runtime backend from 'ray' to 'single_process' self.__change_runtime() - def __get_exp_name(self, notebook_path: Path) -> None: + def __get_exp_name(self, notebook_path: Path) -> str: """Fetch the experiment name from the Jupyter notebook. Args: notebook_path (str): Path to Jupyter notebook. @@ -73,7 +73,10 @@ def __get_exp_name(self, notebook_path: Path) -> None: if match: logger.info(f"Retrieved {match.group(1)} from default_exp") return match.group(1) - return None + raise ValueError( + "The notebook does not contain a '#| default_exp Path: """Converts a Jupyter notebook to a Python script. @@ -216,7 +219,13 @@ def __extract_class_initializing_args(self, class_name) -> Dict[str, Any]: return instantiation_args def _extract_positional_args(self, args) -> Dict[str, Any]: - """Extract positional arguments from the AST nodes.""" + """Extract positional arguments from the AST nodes. + Args: + args: AST nodes representing the arguments. + + Returns: + Dict[str, Any]: Dictionary of argument names and their values. + """ positional_args = {} for arg in args: if isinstance(arg, ast.Name): @@ -228,7 +237,13 @@ def _extract_positional_args(self, args) -> Dict[str, Any]: return positional_args def _extract_keyword_args(self, keywords) -> Dict[str, Any]: - """Extract keyword arguments from the AST nodes.""" + """Extract keyword arguments from the AST nodes. + Args: + keywords: AST nodes representing the keyword arguments. + + Returns: + Dict[str, Any]: Dictionary of keyword argument names and their values. + """ keyword_args = {} for kwarg in keywords: value = ast.unparse(kwarg.value).strip() @@ -241,7 +256,13 @@ def _extract_keyword_args(self, keywords) -> Dict[str, Any]: return keyword_args def _clean_value(self, value: str) -> str: - """Clean the value by removing unnecessary parentheses or brackets.""" + """Clean the value by removing unnecessary parentheses or brackets. + Args: + value (str): The string value to be cleaned. + + Returns: + str: The cleaned string value + """ if value.startswith("(") and "," not in value: value = value.lstrip("(").rstrip(")") if value.startswith("[") and "," not in value: diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py index f06240b8cd..63d26ddeeb 100644 --- a/openfl/experimental/workflow/notebooktools/notebook_tools.py +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -84,11 +84,11 @@ def export_federated( (archive_path, flow_class_name). """ instance = cls(notebook_path, output_workspace) - instance.generate_requirements() - instance.generate_plan_yaml(director_fqdn, tls) + instance._generate_requirements() + instance._generate_plan_yaml(director_fqdn, tls) instance._clean_generated_workspace() print_tree(output_workspace, level=2) - return instance.generate_experiment_archive() + return instance._generate_experiment_archive() @classmethod def export(cls, notebook_path: str, output_workspace: str) -> None: @@ -98,12 +98,12 @@ def export(cls, notebook_path: str, output_workspace: str) -> None: output_workspace (str): Path for the generated workspace directory. """ instance = cls(notebook_path, output_workspace) - instance.generate_requirements() - instance.generate_plan_yaml() - instance.generate_data_yaml() + instance._generate_requirements() + instance._generate_plan_yaml() + instance._generate_data_yaml() print_tree(output_workspace, level=2) - def generate_experiment_archive(self) -> Tuple[str, str]: + def _generate_experiment_archive(self) -> Tuple[str, str]: """ Create archive of the generated workspace @@ -121,7 +121,7 @@ def generate_experiment_archive(self) -> Tuple[str, str]: return arch_path, self.flow_class_name - def generate_requirements(self) -> None: + def _generate_requirements(self) -> None: """Extracts pip libraries from exported python script and append in workspace/requirements.txt """ @@ -152,7 +152,7 @@ def _clean_generated_workspace(self) -> None: if data_file.exists(): data_file.unlink() - def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> None: + def _generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> None: """Generate the plan.yaml Args: director_fqdn (str): Fully qualified domain name of the director node. @@ -187,7 +187,7 @@ def generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> No # Write the updated plan configuraiton to the plan.yaml file Plan.dump(plan, data) - def generate_data_yaml(self) -> None: + def _generate_data_yaml(self) -> None: """Generate data.yaml""" # Get flow class_name @@ -224,6 +224,8 @@ def _extract_flow_details(self) -> str: """Extract the flow class details""" flspsec = import_module("openfl.experimental.workflow.interface").FLSpec flow_details = self.code_analyzer.get_flow_class_details(flspsec) + if not flow_details: + raise ValueError("Failed to extract flow class details") return flow_details def _initialize_plan_yaml(self, plan_yaml) -> dict: From ffde95b1c894ec5002c11a60d27bf56dc2647adf Mon Sep 17 00:00:00 2001 From: refai06 Date: Sat, 15 Feb 2025 00:50:38 +0530 Subject: [PATCH 4/7] Added testcase Signed-off-by: refai06 --- .../workflow/notebooktools/notebook_tools.py | 2 +- .../301_MNIST_Watermarking.ipynb | 924 ++++++++++++++++++ .../test_artifacts/actual/.workspace | 2 + .../test_artifacts/actual/plan/cols.yaml | 5 + .../test_artifacts/actual/plan/data.yaml | 51 + .../test_artifacts/actual/plan/defaults | 2 + .../test_artifacts/actual/plan/plan.yaml | 20 + .../test_artifacts/actual/requirements.txt | 6 + .../test_artifacts/actual/src/__init__.py | 2 + .../test_artifacts/actual/src/experiment.py | 664 +++++++++++++ .../test_artifacts/expected/.workspace | 2 + .../test_artifacts/expected/plan/cols.yaml | 5 + .../test_artifacts/expected/plan/data.yaml | 51 + .../test_artifacts/expected/plan/defaults | 2 + .../test_artifacts/expected/plan/plan.yaml | 20 + .../test_artifacts/expected/requirements.txt | 6 + .../test_artifacts/expected/src/__init__.py | 2 + .../test_artifacts/expected/src/experiment.py | 664 +++++++++++++ .../testcase_export/test_script.py | 73 ++ .../MNIST_Watermarking.ipynb | 587 +++++++++++ .../test_artifacts/actual/.workspace | 2 + .../test_artifacts/actual/plan/defaults | 2 + .../test_artifacts/actual/plan/plan.yaml | 25 + .../test_artifacts/actual/requirements.txt | 7 + .../test_artifacts/actual/src/__init__.py | 2 + .../test_artifacts/actual/src/experiment.py | 380 +++++++ .../test_artifacts/expected/.workspace | 2 + .../test_artifacts/expected/plan/defaults | 2 + .../test_artifacts/expected/plan/plan.yaml | 25 + .../test_artifacts/expected/requirements.txt | 7 + .../test_artifacts/expected/src/__init__.py | 2 + .../test_artifacts/expected/src/experiment.py | 380 +++++++ .../testcase_export_federated/test_script.py | 95 ++ 33 files changed, 4020 insertions(+), 1 deletion(-) create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py create mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py index 63d26ddeeb..f75cb2a6cf 100644 --- a/openfl/experimental/workflow/notebooktools/notebook_tools.py +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -205,7 +205,7 @@ def _generate_data_yaml(self) -> None: data = self._initialize_data_yaml(data_yaml) # Initiaize runtime name - runtime_name = "local_runtime" + runtime_name = "runtime_local" # Process aggregator information using CodeAnalyzer runtime_created = self.code_analyzer.process_aggregator( diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb b/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb new file mode 100644 index 0000000000..dcd327ed1a --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb @@ -0,0 +1,924 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "dc13070c", + "metadata": {}, + "source": [ + "# Workflow Interface 301: Watermarking\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/intel/openfl/blob/develop/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "8f28c451", + "metadata": {}, + "source": [ + "This OpenFL Workflow Interface tutorial demonstrates Watermarking of DL Model in Federated Learning. Watermarking enables the Model owner to assert ownership rights and detect stolen model instances. \n", + "\n", + "In this tutorial we use Backdooring to embed Watermark on a DL model trained on MNIST Dataset. This involves training the DL model with both the actual training data and the backdoor (a.k.a Watermark dataset). Watermark dataset is designed by the Model owner and consists of mislabelled input and output data pairs. Watermarked model performs normally on the Target dataset but returns incorrect labels on the Watermark dataset. Watermark dataset needs to be hidden from the Collaborators and Watermarking embedding needs to be performed at a trusted entity (Aggregator in this case)\n", + "\n", + "This workflow demonstrates: \n", + "- Flexibility to define the Watermark embedding steps as Aggregator processing steps without any involvement of Collaborators\n", + "- Ability to define Watermark dataset as a private attribute of Aggregator entity\n", + "- Flexibility to select a subset of collaborators on which Model Training is performed every training round\n", + "- Visualize the Workflow as a Graph\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "a4394089", + "metadata": {}, + "source": [ + "# Getting Started" + ] + }, + { + "cell_type": "markdown", + "id": "ff167e44", + "metadata": {}, + "source": [ + "Initially, we start by specifying the module where cells marked with the `#| export` directive will be automatically exported. \n", + "\n", + "In the following cell, `#| default_exp experiment `indicates that the exported file will be named 'experiment'. This name can be modified based on user's requirement & preferences" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e9a73bd", + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp experiment" + ] + }, + { + "cell_type": "markdown", + "id": "e69cdbeb", + "metadata": {}, + "source": [ + "Once we have specified the name of the module, subsequent cells of the notebook need to be *appended* by the `#| export` directive as shown below. User should ensure that *all* the notebook functionality required in the Federated Learning experiment is included in this directive" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "857f9995", + "metadata": {}, + "source": [ + "First we start by installing the necessary dependencies for the workflow interface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7475cba", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "# !pip install git+https://github.com/securefederatedai/openfl.git\n", + "!pip install -r workflow_interface_requirements.txt\n", + "!pip install torch\n", + "!pip install torchvision\n", + "!pip install matplotlib\n", + "!pip install git+https://github.com/pyviz-topics/imagen.git@master\n", + "!pip install holoviews==1.15.4\n", + "\n", + "\n", + "# Uncomment this if running in Google Colab\n", + "#!pip install -r https://raw.githubusercontent.com/intel/openfl/develop/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt\n", + "#import os\n", + "#os.environ[\"USERNAME\"] = \"colab\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "7bd566df", + "metadata": {}, + "source": [ + "We begin with the quintessential example of a pytorch CNN model trained on the MNIST dataset. Let's start by defining our dataloaders, model, optimizer, and some helper functions like we would for any other deep learning experiment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9bd8ac2d", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "import torch\n", + "import torchvision\n", + "import numpy as np\n", + "import random\n", + "import pathlib\n", + "import os\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "import PIL.Image as Image\n", + "import imagen as ig\n", + "import numbergen as ng\n", + "\n", + "random_seed = 1\n", + "torch.backends.cudnn.enabled = False\n", + "torch.manual_seed(random_seed)\n", + "\n", + "# MNIST Train and Test datasets\n", + "mnist_train = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=True,\n", + " download=True,\n", + " transform=torchvision.transforms.Compose(\n", + " [\n", + " torchvision.transforms.ToTensor(),\n", + " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "mnist_test = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=False,\n", + " download=True,\n", + " transform=torchvision.transforms.Compose(\n", + " [\n", + " torchvision.transforms.ToTensor(),\n", + " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self, dropout=0.0):\n", + " super(Net, self).__init__()\n", + " self.dropout = dropout\n", + " self.block = nn.Sequential(\n", + " nn.Conv2d(1, 32, 2),\n", + " nn.MaxPool2d(2),\n", + " nn.ReLU(),\n", + " nn.Conv2d(32, 64, 2),\n", + " nn.MaxPool2d(2),\n", + " nn.ReLU(),\n", + " nn.Conv2d(64, 128, 2),\n", + " nn.ReLU(),\n", + " )\n", + " self.fc1 = nn.Linear(128 * 5**2, 200)\n", + " self.fc2 = nn.Linear(200, 10)\n", + " self.relu = nn.ReLU()\n", + " self.dropout = nn.Dropout(p=dropout)\n", + "\n", + " def forward(self, x):\n", + " x = self.dropout(x)\n", + " out = self.block(x)\n", + " out = out.view(-1, 128 * 5**2)\n", + " out = self.dropout(out)\n", + " out = self.relu(self.fc1(out))\n", + " out = self.dropout(out)\n", + " out = self.fc2(out)\n", + " return F.log_softmax(out, 1)\n", + "\n", + "\n", + "def inference(network, test_loader):\n", + " network.eval()\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in test_loader:\n", + " output = network(data)\n", + " pred = output.data.max(1, keepdim=True)[1]\n", + " correct += pred.eq(target.data.view_as(pred)).sum()\n", + " accuracy = float(correct / len(test_loader.dataset))\n", + " return accuracy\n", + "\n", + "\n", + "def train_model(model, optimizer, data_loader, entity, round_number, log=False):\n", + " # Helper function to train the model\n", + " train_loss = 0\n", + " log_interval = 20\n", + " model.train()\n", + " for batch_idx, (X, y) in enumerate(data_loader):\n", + " optimizer.zero_grad()\n", + "\n", + " output = model(X)\n", + " loss = F.nll_loss(output, y)\n", + " loss.backward()\n", + "\n", + " optimizer.step()\n", + "\n", + " train_loss += loss.item() * len(X)\n", + " if batch_idx % log_interval == 0 and log:\n", + " print(\"{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}\".format(\n", + " entity,\n", + " round_number,\n", + " batch_idx * len(X),\n", + " len(data_loader.dataset),\n", + " 100.0 * batch_idx / len(data_loader),\n", + " loss.item(),\n", + " )\n", + " )\n", + " train_loss /= len(data_loader.dataset)\n", + " return train_loss" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "f0c55175", + "metadata": {}, + "source": [ + "Watermark dataset consists of mislabelled (input, output) data pairs and is designed such that the model learns to exhibit an unusual prediction behavior on data points from this dataset. The unusual behavior can then be used to demonstrate model ownership and identify illegitimate model copies\n", + "\n", + "Let us prepare and inspect the sample Watermark dataset consisting of 100 images = 10 classes (1 for each digit) x 10 images (per class). Watermark images were generated by superimposing a unique pattern (per class) on a noisy background (10 images / class). (Reference - WAFFLE: Watermarking in Federated Learning https://arxiv.org/abs/2008.07298)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcad2624", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "watermark_dir = \"./files/watermark-dataset/MWAFFLE/\"\n", + "\n", + "\n", + "def generate_watermark(\n", + " x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir\n", + "):\n", + " \"\"\"\n", + " Generate Watermark by superimposing a pattern on noisy background.\n", + "\n", + " Parameters\n", + " ----------\n", + " x_size: x dimension of the image\n", + " y_size: y dimension of the image\n", + " num_class: number of classes in the original dataset\n", + " num_samples_per_class: number of samples to be generated per class\n", + " img_dir: directory for saving watermark dataset\n", + "\n", + " Reference\n", + " ---------\n", + " WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298)\n", + "\n", + " \"\"\"\n", + " x_pattern = int(x_size * 2 / 3.0 - 1)\n", + " y_pattern = int(y_size * 2 / 3.0 - 1)\n", + "\n", + " np.random.seed(0)\n", + " for cls in range(num_class):\n", + " patterns = []\n", + " random_seed = 10 + cls\n", + " patterns.append(\n", + " ig.Line(\n", + " xdensity=x_pattern,\n", + " ydensity=y_pattern,\n", + " thickness=0.001,\n", + " orientation=np.pi * ng.UniformRandom(seed=random_seed),\n", + " x=ng.UniformRandom(seed=random_seed) - 0.5,\n", + " y=ng.UniformRandom(seed=random_seed) - 0.5,\n", + " scale=0.8,\n", + " )\n", + " )\n", + " patterns.append(\n", + " ig.Arc(\n", + " xdensity=x_pattern,\n", + " ydensity=y_pattern,\n", + " thickness=0.001,\n", + " orientation=np.pi * ng.UniformRandom(seed=random_seed),\n", + " x=ng.UniformRandom(seed=random_seed) - 0.5,\n", + " y=ng.UniformRandom(seed=random_seed) - 0.5,\n", + " size=0.33,\n", + " )\n", + " )\n", + "\n", + " pat = np.zeros((x_pattern, y_pattern))\n", + " for i in range(6):\n", + " j = np.random.randint(len(patterns))\n", + " pat += patterns[j]()\n", + " res = pat > 0.5\n", + " pat = res.astype(int)\n", + "\n", + " x_offset = np.random.randint(x_size - x_pattern + 1)\n", + " y_offset = np.random.randint(y_size - y_pattern + 1)\n", + "\n", + " for i in range(num_samples_per_class):\n", + " base = np.random.rand(x_size, y_size)\n", + " # base = np.zeros((x_input, y_input))\n", + " base[\n", + " x_offset : x_offset + pat.shape[0],\n", + " y_offset : y_offset + pat.shape[1],\n", + " ] += pat\n", + " d = np.ones((x_size, x_size))\n", + " img = np.minimum(base, d)\n", + " if not os.path.exists(img_dir + str(cls) + \"/\"):\n", + " os.makedirs(img_dir + str(cls) + \"/\")\n", + " plt.imsave(\n", + " img_dir + str(cls) + \"/wm_\" + str(i + 1) + \".png\",\n", + " img,\n", + " cmap=matplotlib.cm.gray,\n", + " )\n", + "\n", + "\n", + "# If the Watermark dataset does not exist, generate and save the Watermark images\n", + "watermark_path = pathlib.Path(watermark_dir)\n", + "if watermark_path.exists() and watermark_path.is_dir():\n", + " print(\n", + " f\"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... \"\n", + " )\n", + " pass\n", + "else:\n", + " print(f\"Generating Watermark dataset... \")\n", + " generate_watermark()\n", + "\n", + "\n", + "class WatermarkDataset(torch.utils.data.Dataset):\n", + " def __init__(self, images_dir, label_dir=None, transforms=None):\n", + " self.images_dir = os.path.abspath(images_dir)\n", + " self.image_paths = [\n", + " os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir)\n", + " ]\n", + " self.label_paths = label_dir\n", + " self.transform = transforms\n", + " temp = []\n", + "\n", + " # Recursively counting total number of images in the directory\n", + " for image_path in self.image_paths:\n", + " for path in os.walk(image_path):\n", + " if len(path) <= 1:\n", + " continue\n", + " path = path[2]\n", + " for im_n in [image_path + \"/\" + p for p in path]:\n", + " temp.append(im_n)\n", + " self.image_paths = temp\n", + "\n", + " if len(self.image_paths) == 0:\n", + " raise Exception(f\"No file(s) found under {images_dir}\")\n", + "\n", + " def __len__(self):\n", + " return len(self.image_paths)\n", + "\n", + " def __getitem__(self, idx):\n", + " image_filepath = self.image_paths[idx]\n", + " image = Image.open(image_filepath)\n", + " image = image.convert(\"RGB\")\n", + " image = self.transform(image)\n", + " label = int(image_filepath.split(\"/\")[-2])\n", + "\n", + " return image, label\n", + "\n", + "\n", + "def get_watermark_transforms():\n", + " return torchvision.transforms.Compose(\n", + " [\n", + " torchvision.transforms.Grayscale(),\n", + " torchvision.transforms.Resize(28),\n", + " torchvision.transforms.ToTensor(),\n", + " torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize\n", + " ]\n", + " )\n", + "\n", + "\n", + "watermark_data = WatermarkDataset(\n", + " images_dir=watermark_dir,\n", + " transforms=get_watermark_transforms(),\n", + ")\n", + "\n", + "# Set display_watermark to True to display the Watermark dataset\n", + "display_watermark = True\n", + "if display_watermark:\n", + " # Inspect and plot the Watermark Images\n", + " wm_images = np.empty((100, 28, 28))\n", + " wm_labels = np.empty([100, 1], dtype=int)\n", + "\n", + " for i in range(len(watermark_data)):\n", + " img, label = watermark_data[i]\n", + " wm_labels[label * 10 + i % 10] = label\n", + " wm_images[label * 10 + i % 10, :, :] = img.numpy()\n", + "\n", + " fig = plt.figure(figsize=(120, 120))\n", + " for i in range(100):\n", + " plt.subplot(10, 10, i + 1)\n", + " plt.imshow(wm_images[i], interpolation=\"none\")\n", + " plt.title(\"Label: {}\".format(wm_labels[i]), fontsize=80)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d82d34fd", + "metadata": {}, + "source": [ + "Next we import the `FLSpec`, `LocalRuntime`, placement decorators (`aggregator/collaborator`), and `InspectFlow`.\n", + "\n", + "- `FLSpec` – Defines the flow specification. User defined flows are subclasses of this.\n", + "- `Runtime` – Defines where the flow runs, infrastructure for task transitions (how information gets sent). The `LocalRuntime` runs the flow on a single node.\n", + "- `aggregator/collaborator` - placement decorators that define where the task will be assigned\n", + "- `InspectFlow` – Utility to visualize the User-defined workflow as a Graph (only currently compatible in flows without loops)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89cf4866", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "from copy import deepcopy\n", + "\n", + "from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator\n", + "from openfl.experimental.workflow.runtime import LocalRuntime\n", + "from openfl.experimental.workflow.placement import aggregator, collaborator\n", + "from openfl.experimental.workflow.utilities.ui import InspectFlow\n", + "\n", + "\n", + "def FedAvg(agg_model, models, weights=None):\n", + " state_dicts = [model.state_dict() for model in models]\n", + " state_dict = agg_model.state_dict()\n", + " for key in models[0].state_dict():\n", + " state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts],\n", + " axis=0, \n", + " weights=weights))\n", + " \n", + " agg_model.load_state_dict(state_dict)\n", + " return agg_model" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiQAAAHCCAYAAADfBtJyAADpl0lEQVR4nOxdB3xjxfFe9WLJvdfzna83jqMTem+hd0JCC5B/ICEhQEgjEFoSSGgJPaG3QOglJPR6vVf33m1Zlq2u/3yr93yyLN/ZPvvkMt/93kl+emXfvtnZb2ZnZ/WhUEgwGIzhgdqNZsXm0n2e39J1wae13YfVd7lzW1zejHiXi7HnoBGaUJbN1JSbaKo/Nt/4n3PnZ760aNb0dfEuF4MxUaGPdwEYjImGru7uxL+88811f93s+6nD7UsiTq+Jd5kY8UBI1Hf15mJbWSeWPrm+49KfHtj11+sOn/8Xo8HgjXfpGIyJBiYkDMYw4PP5DPd8tOHnd6113+QNBI3xLg9jfACktNHly/7tx7W3Wry9vVcfu8/fDQaDL97lYjAmEpiQMBjDwNfrtx74tzWdP2IywogFyMVdq503HT6v6ZNFxfk8fMNgDANMSBiMYeD5Ms8FrS5verzLwRi/aHC6c/69pvp0JiQMxvDAhITBGAa+qmw7KN5lYIx/fLm97uB4l4HBmGhgQsJgDAO1nb35gmNYGbtAV68vMd5lYDAmGpiQMBjDQDh2RBfvYjDGOTAtPN5lYDAmGpiQMBgMBoPBiDuYkDAYUwQWvU4cWZIu5mfZ5d9dHr/4uLRFbGt1iX0LksXh0weP1Q2GQuJvX1eKHl+gb9+++XTOjHThcPvEo99W9Tu+MMkijpmVIVIsRqHVCBGg8x29PvFZRZvYTvdDOsaZaQni9AU5g94zRP+eX10n6rrcu/fgDAZjQoAJCYMxBTArPUE8dOoicXBxqrAYwkNOgWBIVHX0iMMf+UocVpwm7j5x3qDn+4NB8dTKmj5Cgmvce/J88R06zxsIiq8q28WGJmff8fsXpogHTl3Yd6+Qcr8uIi+PE3n5w8fbJTHa2T1x/BcV7UxIGIwpAiYkDMYUwDUHFYujZmaIeurcr31jlWjs9oi5GTZRlGIRLS6PeG5NnfiCSAWwd16y+Msp84XHHxQXv0THOj2SUHT27sjzdfysDLE0P1keY9BpxDUHF4tr39wg/wbgFdHTf/CKnPHMcrnv3MW54pJ9CsW135kuvq3pFJ+Ut4qDHvpc/pZpM4kXL1wqmcv172wUq+oc8p4bI0gOg8GY3GBCwmBMARQmW4WGSEKvNyBW1nVKQvJVVXvf7yAq9YonwqzXiWAo7KEAMajq7O13LbNeKy5aki9M9Pn8qlpx2Ix0cczMTJGXaBbl7T39jvXTNZbVdIhuuu+aeoc4uiRDTE9LEHvnJonXNjaIr6s75HEFSRZ5TyyttYlIiLqfwWBMHTAhYTCmAN7c1CiOm50pSjISxPqfHSE+2Nos3t7SJJ5fUyt8geEtsDk73SaOnZUp2lxe8eiyKoH5JBftXSBOn5ct7vmiPOY5GLo5bX62yLSbMANF1Hb1xjyOwWBMXTAhYTCmAP65qkYGsV6+X6E4qiRDnLUoV5yxMEdcf+gMcfrTy0Vpm2tI1zHqtHLYxWbSi39vaBDLajrFgiy7+B4RklMX5IinVtWK1p4d68qlJhhF6+9OkN/1Oo3QajTi1XX14nUiSAwGgxEJJiQMxhQAhl9eWV8v3t/WLGakWsUFe+WLy/cvEguyE8Vl+xaKmz/YLIdLdoXCZIs4fnam/H7S3Cyx9rrDhc0YViOLc5LkzJv36B4qfIGgnFmzJDdJpBM5WVnbKX729kYZl8JgMBiRYELCYEwBpFoMctikudsj1jR0iVpHqZwJc+j0NJFrN8kA1KEM3RxE5xQToXF5A3Qtr5wOjGm/CUadSKZ7nDQnqx8hcbr94synl4nT5ueIh05fJANmEX/y589KhS84vKEiBoMxucGEhMGYArjrhHniyBnpYlltJ5EJPxEUo5wlA0KxvtEpAsFdX8NApOXifQqEQa8Vzy2vFlf9e52c8gv85shZ4pZjZ4vj52QK2/v91Qpox7/WN4gDi1LE1QcWi58fOkN8U90uPi5vG4MnZTAYExVMSBiMSQ5MwS1rc0lCcOaCHGEkQoHZL9tbusWzq2rFg19XSGKyK2BIZmF2ovD6gzJI1hvBYlYQ0en2BORMm6NmDEyw1usPiD99WiZOnJ0lilKt4o7j54kTnvxGdLp9A45lMBhTE0xIGIxJDoyM/OmzMvG3byplUCqm/4J/gFBg6CWajHxR1S7ybv/PgNwjaxu7xLx7Ppbfu6KIxIelLaL47v/KZQfhgcG5OX8IXwP3ACo7esTi+z6VeUsQ0+L0+PvOR/Kzwjs+DF/bwySFwZiKYELCYEwBgHREEoCdAYGobREzZXa1H4DHpT3qN4+//98gJ45BPCIo32DXZjAYUwNMSBgMBoPBYMQdTEgYDAaDwWDEHUxIGAwGg8FgxB1MSBiMSQ67SS/2yUsWK+o6hxxHMtbAOjgo0/Lazn6zdRgMxtQFExIGYxIDM2rOXZQrbjlmtrj6tXXirS1N8S6SRLLZIP544lxx6lPL+6WaZzAYUxdMSBiMSQyrQSfOIULy8tp6uXbNO1ub5YwWZFY9Y36OTI6Gxe4+K28T/97UKDO6XrR3vpiemiCPw3mY7vu9JXlidqZdzrR5blWtaOr2iEv3LRTZdrPY1tItHlteJTO9lqRaxcX7FEqvjNPtE08srxbtPT5xxf5FMu18Q5dbPPR1pSwbpiD/4bg5osbhFo99WyXoT3H1AdOEncjKt1Ud4pUN9cJG5fz+0gIxLcUqenwB8cSyalHR0bPzh2YwGBMSTEgYjEmMJTlJMg/J48uqxP2nLhRLc5PE8rpOccW+RWJRTqK48t9rxalzs2XKdxCSO4+fK8rbe8T172wUNxxWIg6eliLK2l3iUiIZpzy1TLQRubAYtOKJs/YSa+sc4s6Pt4t7T54vh4I+3N4iHjhtobj3szLxSXm7eObcJaIkLUF87GgV/yBi4iJC8egZi8QFi3PFG5ub5FThX3+wRXpIEog4vXLRPuIZIjsfbGsWD1BZPf6AWEdk6LR52eKMZ1YIJ/KbcLZ5BmPSggkJgzFJAQ/EudT5P72qRmxtdYl1DdS5z8+WHo8T5mSKe4g4RK5fY6LjD5+RLv702bcycVksSO+KQS8OKkoVq4mQYIE9EIvDp6fJdXISjHrx39JWoUN6WAUzUhPEHUR02nq9YlaGTdR3uQdcN9VqFNNTreK9rc0yeyvIzekLcsS21h2rEA9WJgaDMTnAhITBmKSYkWYVhxanCZNBJ/YtSBGFSRYxP8su7vykVAaSmvXafsejuwe5AJHZGUBK3Ir3otbhFstrOkVHr08SCp1G04+MABfslUckpFdc/+5m8cvDS+i+ugHXBNnA/ZHFFcBCgPC6BNglwmBMGTAhYTAmKQ6Zlia9Ij98da38O8tmEu9esr/YNz9ZvLmxUZy/V74oa+8R3ylOlSQCJOWdTU3iB0sLxHOra8U+dNxXle0Drotsq6+tbxCHEdl5bk2dAIfAvs3NTlHT2Su+v3eB2N7aLQqSzfL4RqdHroGzgMhQktkgerwBSWgaaP/B8LQ0OCShQRzLZfsWio/LWuVCgPd/US5jVhgMxtQAExIGYxhIMGpdLo9IiHc5hgLEfnxdvYNQIDX7Te9vFg6PTzy1qlZUEnnAEE0wEOrzRNzyv63ixNmZ4mAiKW5lnZtu+rzn8zL5CfiCIXHb/7aJ4+i4w6anCY8/KOocbtHi8opr39wgvjsvS8zJtMtF+OD3wJAR4kQOnpYqtrR0i2U1HURg/OLnb28Ux9M1Ei168fbmJvEz+vu0BeFAWwwnfUPHJZn14oGvKkSvLxCXOmQwGHsOTEgYjGFgWmpCZXNDT2a8yzEU/K+0td/fGI5BbAZw8uwssXd+knATaViUmyRu/mCz3H/B4jyRYTcJg1YjshJN4sV36yUZeGV9Q79rIUD1tQ3992GGzg+W5gv4NDBLB/EfX1V1yHu8GnUsgNkyf/+mst8+eGYiAeLy742NI3n8uCIvxVYX7zIwGBMNTEgYjGHguLlZH6xorNwnGArtPNBinOOTijaxssEhV+fFDJxOdzhh2r+IOFiN4RiP+74sF13uoSdSA3l4YkWNTHoGhwuGcUBGpho0GhE6ZmHRh/EuB4Mx0cCEhMEYBs6blfTiS6v0527r9M2Kd1l2B91ev9yigRkunYOsyLsrBHjFXolF2YnrTpid9V68y8FgTDQwIWEwhoF5xYWbfnOI47Yff1j7oMPtS4p3eRjjC8kWQ+cfj8i/oSg3uyreZWEwJhqYkDAYw8T5B857IS3J1vbT9yv+ur2te2YoJDS7Posx2bFPfvKKPx5RcMPh86d9otFoeL4ygzFMMCFhMIYJnU4XOGFh8XuHzMj6/ON1ZUd8um77Yd1ujy3e5YoHNFqd0Z+Qur856Nnqc3U1x7s88UBSgtlx7N7z/nPA7MJvEqwW167PYDAYscCEhMEYIWxWa/cpByx8C1u8yxIvtLe3p69bt+619PTsRxcsWPB+vMvDYDAmLpiQMBiMESMUCmkCgYAlNMFnHTEYjPiDCQmDwRgxiIzoaTObTKbueJeFwWBMbDAhYTAYIwY8JLTpCTzfl8Fg7BaYkDAYDAaDwYg7mJAwGAwGg8GIO5iQMBgMBoPBiDuYkDAYjBHD7/cjqNXCQa0MBmN3wYSEwWCMGJjuy0GtDAZjNMCEhMFgMBgMRtzBhITBYAwLmOrb29trxafP57PSLo3f77e6XC4b1nAxmUxupNePdzkZDMbEAhMSBoMxLHg8HtO2bdv+0N3dPQfxI16vN3X79u336/V6F22d8+fP/7Hdbm+LdzkZDMbEAhMSBoMxLJhMJg8RkS3Nzc3/FwwGDdjX2tp6CH2EiouL72YywmAwRgImJAwGY1jAsMzs2bNfbGhouN7tdpeo+4mIbM3Ozn4inmVjMBgTF0xIGAzGsGGz2RwZGRl/q6uru1vxkoSSkpKeys3NLYt32RgMxsQEExIGgzEiFBcXv9zR0XFRd3f33omJieunT5/+HLwn8S4Xg8GYmGBCwmAwRoTMzMy6jIyMh4mQPJKVlfUX+l4b7zIxGIyJCyYkDAZjxCgqKnozEAgcsnDhwmfYO8JgMHYHTEgYjBEAOThKq2tLXtvYdMZnle2H1rS7Cjp7vcnxLteehkar1ZotNkvvG69UxLssexomvc5TkJJQszAvZf2Z0xNePXDRnK8NBoMv3uViMCYqmJAwGMOE2+02v/z56nNuXd3z24r2nuJgKKQVQke/WOJdtPjAiz7YkhrvYuxx+IUobfCVfNzQfMRzm4wXfq+085kbjl7wx5y0lIZ4F43BmIhgQsJgDAPwjDz7yaqLrvm8/QG3P2iOd3kY4wNtPd60+1f7rq3ylhY9d9bCCy0Wc2+8y8RgTDQwIWEwhoE1lfV73bba9RsmI4xowFP27w0Npz9daL74h4cueJRjahiM4YEJCYMxDDy+qvHy6s7ewniXgzF+8cia9itPmd3yVm52Zn28y8JgTCQwIWEwhoEvK9oOjncZGOMb21tdM1vb29OZkDAYwwMTEgZjGKjqcBWFA1gZjNjo9vptHq/PFO9yMBgTDUxIGIxhwBcQhniXgcFgMCYjmJAwGAwGg8GIO5iQMBgTAMlmg3j30v1FYbJVGHQaua/F5RWflLaKx5dXizUNDqHTaMTdJ8wVZyzMFQnG8LBSry8oKjtc4vFl1eLNTY2iy+MXBq1GvPmD/cTeeckD7tPS7RUn//NbOqdnwG8vnLe3OHJmxqBlXFbTIc57fqU4e0GuuOnImbLMVCThC4REQ5db/Gt9vXhmda2op++RyLaZxD/PWSKW5CWJf66oETe/v1kEQuEJKnimh05bKE5fkCOeW1UrbnhvkzDqtGLjz48Qgg655cMt4qlVOzLW/2Bpgbj7xHniy8p2cdGLq0SPLzDsumYwGPEBExIGYwIAnfCinERkBxWbmpzCScSiMNkirj5omjhxbpY44+llYgPtn5VhE8WpVtnpg1TYjHpxYFGqOKAwVTyxrEpc+9YGuppGpFiMIiPBJMrbXaLR6em7T3uPlwhEMGYZ6uia21tdOF0UJFnk/VGODY1OTHkVNZ1uAR5RQPtnpicIlzcgfwOBQrnuILJ03KwMccYzK0Sne0dC0+/OzRaHz0inZ9OKMxfmiKdWVotNzd3hH+leiWa9yCTSYjfp8afcMhKMRLr04o8nzZek618bGmQZLAadPDbZYhCasXsdDAZjDMCEhMGYQOiijvyKV9eIZTWdYv/8ZPHChfuIohSLOGF25o5OnPDSmjrpTUikTvzXR80S1xw8XZy2IEfc/2UFkZCw9wNeiLs/KRWPEVEZCq5/d5P8REd/85Ezxa3HziHC0SWOfPQr4fYH+35Tsb6hSxz12FdCq9GIC5fki3tPni+Jx4GFKeK9bc3yGBCIi/cpEDqtRlRQuUCmTp6TTc9SOqQygXzc990FotbRK76u7hjSOQwGY3yCCQmDMUGxuaVbdPb6xLQUq0g0x/YItNPva+odIhgMSS+L3bTnZwhh2GRVXaf8tBFBSrHsiAs+kggKPD/1Dre4mQjPE+cukV6Sx5dXybLvDH56JicRtOxEs3jo9EXilH8uG+tHYTAYYwgmJAzGBIJZrxNnL8qTQzDnUMe9ODdReAJB8XVVe1/cBbBXXpL40YHFIp8668v3LxJGvVZsrnGKjRFeFL1WIx4+Y5H4G3XmKn753ibx0NeV4qQ5mSLbHk5Gi6GQzyvaxPpG564LGMGKcujeVx8wTcazXLy0QHozmrs94vPK9vChdOyV+xVJkvKP5dXitU2N4lZHr5iXZReHFqeJ1+nvncFPz/2LdzaJ8/fKk7EtD5++kMrZPpRqZDAY4xBMSBiMCQQrde7XHzpDfnd5/WJtfZd4fFmVeH9bS7/jjpiRLjcANOWNjY3il+9vEr2Yt6zVhvfTD4jxqOrcsewKhk1sdI+fHTJDxp4AiCn56Zvrh0ZIIoDhl3tPWdD3N4ZwrqXr1ClBrfMz7OIoIhJuKhO8IcfNyhSt3V5Rkm4TP9incJeEBOjs9YqfvLlBvHXJ/uIYOn8WnctgMCYmmJAwGBMICCL95XubxZYWp+jxBkQ1kQm1g4fHQ8Wr6+vFe1ubxe+PmSNyk8wy9qTO0X92CzwqD35VMSCGxKDTEnHYIJLMYfUQDCH7aLcYEiJWb9lIBOam9zaJu06cJ70eIfqHoFh4XBBXcsq8LEmwgFuOmd3vMkfMSBNZNpNo7fHu8nYbm53SswNPz8wMJiQMxkQFExIGYwIB3orltR0yqHVnqO7oFU+vrJEjKI+cuVicsziPzuuUBCQSFr1WzmLpA/Xwvf6gWFG78+sPBQ4iQf8tbRFXv7ZWvPq9fcWC7EQiSLPF/72xXk4JPn1Brjzu2VW14uPyVvk9hfb/7NAZIsduFifNyRJPr6oZ0r1eWd8g5mXaxa+PniXJDoPBmHhgQsJgTFLAe/Ds6lpx2rwccdK8LPGrI2fJ/Bzq0As8KvedulBuKuAN+f6LK8Wza+pGrRyIGXnkm0rxq6Nmi0v2LRQfbm+RwbjIO9Ll9os/frq9X5kOKU4Vp87PEd+lMr+6oWFI94DX5a9flosDilLEsbMyR63sDAZjz4EJCYMxAeD2B8RzRC6QZKzVFXsYAzEh/ytrFW3U2cMbAnKB6bg3f7BJ1DndMh8Ihk7WNXaJd7Y0yVk6A68R6psWvDOsa+gST62sEeVtPf2CaYG1ym9lba6+3/5GhASBtZl2syhKsQqb0SvJUgUdU9G+I4YFM2eQHK2DiEpHj1eW+bPyNuGh5/6qql0+E47BufCERMa/dNBz/+4/W0S90yO2NDvlcQwGY+KACQmDMQGADKtXvrZup8eg87/vi/IB+9c1Ounctf323fa/bSMuC7r5tzY3yS3Wb29ubpRbJBqIJNz43uZ++55YUR3z+ghmjQxoffjbKrmpCBA5uWqQuvimppO21UN8EgaDMZ7AhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwZjEwAq6tx87R2xu7hZPrqwWoXEy8eSCxXkyAPaFtaM3vZjBYExsMCFhMCYxFmcnilPmZYu9cnvlVN/Gbo/cH15oTy+Q3NUbCMkMsMjlYSYCg7VlkFrM4w/K/TjWphyLacTdtM9s0Mk1arBoH2YAYYot8pEl0H6s4AuygUyyWFDPSn+rGVkdbr9M7laUbBEJdM2MBKPopuOQ0h7Xw7kBuhaOU8uTYAzfG9dy0bEMBmNyggkJgzFJgU789AU54q+fl4sjS9LFIcVp4pX19UJHzOH3R88SGTaTTD1/+PQ08cPX1onKjh7xz7P3knk8QEaW5CaKs55dKc5blCNOmpstVtR1im0t3eKjslbx2BmLRXO3V6QTofiqqkPc+0WZmJNuE3efME+sqneI6WlW0ehwixve3ywOp/vunZ8sFmTZxeZmp7jjk+1yEb59C5LFFfsXiQ+3tchF9x48daFMA1+UbBVvb24Uz62pE9cfMkPMpfOwf3WdQ3xAxwbHi5uHwWCMKpiQMBiTFCVpCeLAwhTx0FcVcgG7s4icvLaxQZwyJ0vMzrCJi15aLT0bWDcGQzs/PrBYNLu8Ml9IhtUoXv3ePpLUpND3lURGbv9ou/SC/OqImfK4n7y9QXo6Xjx/qXj420q5Hg0IzzOra8W5i3LFkcrifkjAlpVolnlSzqP9f0GuFOIUnxCxufOTUnnNO46bK5bVdoo76B6z0hPEU+cukVla04jwfFTaOmjOEgaDMXnAhITBmKQ4Ynq6JBzzsu3CbNSJ+fS5NDdJZNpMMhMqhkAwHANgiKaAyEWtwy2HVAaDlo7MT7KIhdmJ0huCVO/VdA6GZeAtURf6U2Ez6sUzRC6Q2Ozb6g5xydKCAWvN4O8cu0l8Wt4mSQuGgLAicR6RGAaDMXXAhITBmIRA7MXJ87LEf7a3yCEaDM2UtfeIo2dmiFV1DnHK3CyRZjXKGBAddf4YBUG69QOLUmUciBozEg0MlyBAFnEev/pgsyQ1ero+iEQN3WNupk18QsQC8SlAqsVAm1GmfUesijrcAqJkU45BGbAKMNLagyBhYb1ur1/URK1OzGAwJjeYkDAYkxDwYICIYMVfNZDVRkTjmoOLxX1floeHck5bKOocvcJuDAehvri2XhxEhOQB2t/j8fd5MkA+1HVh8P+TK6rFfacsEI+cvli09njFyppO8cyaWnHv5+XilqNnEymxi1y7SbTRb/VOt1ydGN6U9l6fXIcHpOSj8lbxh2PniHtOni/e2tgor/nnk+aLB05dIFf8/eMnpURggsJPW/RaOQwGY3KCCQmDMQxk2ozNFc5AcbzLsStgcb0T//Ftv30vra+XG7wnD39TKRqIqBxWnCb2ykkSDrePyEZI3PDeJkkaLlqSL4dmfERE7o1aHwczby7915oB92wg8nHxy6uk1+TRMxbLRfpAZL7/ysBjV9Y5xAlR5bvwxVUDjvtF1Po3EwF6rcav02l5OhCDMUwwIWEwhoH52UkbK5zt456Q7AwpFoO4dN8iGfOB1X1veHeTqO9yixlpCeKK/Yrkqrwef0Bc9/ZGOR13qFiSmySOnZUpdFpNOO/JFA1EJSJXm2S3O+JdDgZjooEJCYMxDJwzL/3lj6scR7i8gYR4l2WkwMq7t380cLXf0jaXuPG9TSO+LuJVsE11HFFg/TgnK6Mh3uVgMCYamJAwGMPAGXsVvvbBlvrjnt/WfUEoJGKEfTKmMnITzfU3H1Fyh9Vi6Yl3WRiMiQYmJAzGMJBgtbpuP3Hhrzo0ZSnvbmk+Md7lYYwfgIz8/bjCq2fkZJTFuywMxkQEExIGY5goys6oev2i5NP++cWmHzyxqeuyNfWOvTz+oCne5dqTsGiFSDWGRIpBCIM2JDxBITq8GtHu08jvUwUajQgVp1grTipOeOcXh8/6U35Gaq1Go+FpQQzGCMCEhMEYAQwGg++ywxY+ceZeXa92OhzJwWBIG+8yjSZCImRyulxpDoczzdHlTDNZLDl6k3mWw9k93eF0zQj6/Yl2i6HNrNf3uFzd0212+3aXx5fkdPtSDXq9I9FuK0uyJ5Q5OzvLNCJYn2S3tycl2tusFnO7QW+YVMMZCQkWV1pKSpvRaPTGuywMxkQGExIGY4TQarXB1JTkdmzxLstw4ff79b29vQk9PT1yCwaDGd3d3TPa2tpmdnR0zLBardkWi8UYCgYNZm3Q6GiudyUnJ28vSE5+d35hdpnb7a5JTEzspHNTS0tL39l/yaJz6fwQbSlmsznP4XCUdHa2T/d3O45MSUlJFD6319nh9dZUdPq8Xm9rWlrattTU1DKTyVRGxzfRvVx0Txd99lDH7ol3/TAYjD0PJiQMxiREKBTSBAIBHYgHPol82IlozGptbZ3R0tJSQuSiSKPRZHg8nkwiFZl0XMBms5XZ7fYy+lzrcrneIiLRQMShgT4bExISumLdp6KiYiYRsxCdU5+dnd0W6xi6t629vT2Htmy6ZzZdr4j2lRD52d/pdE7X6XRWIiItRE5aaH8r7avLzMwsTU9PL6NylhPxadDr9X46LoANRHBsa4/BYMQDTEgYjEkAeDuowy9oamqaTqSjkEjINOr4i4lYFBPhmEZ/m61WazURiwp8dnZ2riMCUZaXl1dJpKMqKSmpdazKRmSjm+6zHdsgZbcTSZpGWzF9L8zNzZ1GpOOIhoaGS8rLy/N9Pl8iyBKVu4oITSU9XyURFpS7lkhLFQjTWJWdwWDsOTAhYTAmADwej7mjoyObtkzF01Cg1+tLuru7ix0Ox+xQKJRMnXaryWRqS0xMbKutra0jIvBVVlbWM9R5b6P9zYh70el0ftXbEO9nUkHldBYWFq7Hhr/h3SESYlC8O3p69iR6ZnhUQLam07PsTb8fW11dnVJaWprqdDrNimenij630bGlycnJzURUmuHhoX3tHGjKYIx/MCFhMMYBqPM1UMea1NXVlUIkI4k60Mze3t5ZnZ2dM2grNhqNOQkJCX7aT310wEcdcxd1uGWpqan/LSgoeJg68Vqr1dpNx3Tj84ADDpiwK9OBPCBAVA0SJULRlZ6eXkNfP8bfGIJyuVx2xL7AM0TkJJVIC+ppGpGU6fT70VqtVkdETVdeXq5vbm72g6xg+IfqawsdW0N11EnX7UxKSmpHffEwEIMRfzAhYTD2ANCJer1ek9vtNtNmoY40mT4RRIqYjlnUMebp9XobERM7OlkEiFIHui0xMbGMtnep062gv1vpuDb6bCHi4Yz3M8UL8O4goBZbxO5v1C/wrBCxS6Mttb29PZPqKtNsNk+jv6eVlpZeSnWZS+d6DQaDk+q8u6mpyWEymWqysrI2EWGpICJUTX930zm9tLnpOC8TFgZj7MGEhMEYZWDIgciGFQGkZJ2XUIdXQp3k9LS0tDwiG3lk2RcGg0ETdZRl2EpKSrbTsWstFkttRkZGdUpKSj3IR7yfY6ICQ1JELJqwTZs2bcDqfAphyaQ6zyMyWGS32/Oys7NBWM4gwlKCd0QkxEHvppIISQORmgb6u4wIyzZ6PxX0HiuIsPTG49kYjMkMJiQMxggAb0dbW1sBbbnUsRWQ1V5EhKLE5XJN7+7unkm/p1JnVm+1Wmsw3FBbW1tNJGRZQUFBKXV+26kTbJoMVrcSj9Lr8XjsNpst5iyb8QaFsNRjmzVr1vLo3+HN6uzszGtsbJxNhGU6kctpRET26ejoOL2+vj6P/s4FUaF3W07PXEkEppTOqSKiUkdbPW01PHWZwRg+mJAwGDEAK5o6pUza0h0ORxp1ujm0byZ9n0GW9HSNRoOYDrj8XUQ0uhsaGlqoIypNSUn5vKioaAtZ1LVw96NjImvavd9++03KpFmI96CNqsZvjHdZRgsYEqJ3WY1N3UcE04jAYnXYjb4XIWcLbfhcQCTzUCKmdiIxNiKlNpKJZiKdFUlJSWUkC6VUP3WJiYntJB8tmNHEhIXBGAgmJIwpCSU3B2I1EmmzUyeTEgqFiqlTmYPEYNTx5GZlZRlpn5Y2He3zEQHZRp3JdupUPtRqtcjZ0UkWchdtjqkc0zEVEBlkq6Bm+vTpX6h/QJaIqCYTGUnEJ8lHfk9Pj5wZRKT2ECIjNtofqKurC5BsBZxOZxPJEYJsSzFDiGSsGcG12EienIhbicNjMhhxBRMSxqRE5NRRzGChDgPEYxqmjTY1Nc0hQlKUkZGRQpZuBnUQafSbHjED1HFsLykpWedwON6mvxvT09MRz9HCU0cZOwMyzWKjr3WxfsfMKSK1WcqWSaSjSKfTYRrzmeXl5TPou5nkrR3TtonMtDc3N2Oor0zdkDQOQ020+cbbtG0GY7TAhIQxKUDkw4jgUbJIC0nJIwsp0p9PIyICS7WIiEcaKfwqJNdCHEdNTc0aOq2GvlcgyRYRkebJENPBGJ+AFw0bydu2WL+DsLS3txe2tLRAfgtJVqcbjcbDGxoaflBaWlpMBFuPhHbYiPhUNzY2IiEcEsRVELGuJMLcwoSZMdHBhIQxIQBvR2dnZzZmRsDKJGWdR7tnOp3OmS6Xa5rX6y0gJd5A+1vJgmwjRV5DyvqrtLS0ytmzZ29PSkqqVNOOw8JcsmQJkw/GuIFCWPolh8OworqRjCcjfokIy4za2lrM2ppL+w+vqKjI3LJlSybJv5bkv5yICQJsK4iIYzZQQ2pqagPJPpLEtcT7GRmMXYEJCWNcAEMrsBKRO4IUb2pPT08GkQuMv89EcjCPx5Obnp4eIkLhJmLhIcLRS5bidlK4H+Tm5iJ3RCVtXVicTd3i/UwMxkgBb4cyROPH31artScjI6O+pKTkc/ytxkApyeGstCuV2g48hCVE2KfT8d+hv03UdhCEa6TPIBZHJHJSYTabK4PB4HYE2RKBcSAWCls8n5fBAJiQMPYISAFqkRAMMxWgQOkTicGK29vbp5PVh5iOAiIcViImCfTdQvuhhGvIsttCSvhtOgfDLa2w9Eh5dkCBsot6XIDfQRwAb58SUK0uelhGW98UZqTcx+wwIiJpTqczuaCgIMPn882ifdOJtCzBWkfUhnoxZZsO762vr0fm33IMAymrMG/H7DAkhwO5x3dub4yxBhMSxqgBpAOuZnwSsTAhA2lra+s0UoDTaN8cUp45REIykccBmUrpbyz0VoVpsk1NTW8ajcbGnJyc+szMzCqy3lo5pmP8Q1mBt5feqS3eZWHsANYsUpPDxfodhIXaZr4SZJtNbRB5dGa4XK5jKioqruju7i5JSEhoxhpIRErqCDXJycnItVKZm5u7kdpnrZrBFkSF2ypjNMCEhDEiYIiFFFluY2Mj0p/ndXV15ZGimqOsLluEbKQgG6TkkH+hjqy0Kjrn26ysrKr09PTq7OzsmMF9jIkFpUPyUwc3afKQTAWAsFBbrMQW63cMCSHHCgLEm5ubZxA5wfIGS6mdf5fa/DSPx5MJskJtvILa9zYyKKro7zpcLyMjAwG39Tx1mTFcMCFhxASCSIlEpHZ2dmaQYsrp7e3NstvtRfT3HIxVI6aDFBQSf7Xp9fouIhsOUlzbSRl9kp+fXwHvB/3mQu4GTFUkZcWJoCYJ4AVDrE8wGEQcQzrytND7T8F0VljL9O478c7jXU7GyAHPV2ZmZjm2efPmfaxOo8dsNmr7Jq/Xa6EtT1keYRYZIrNJHxxI35Pr6uqSSXfY6RpNycnJW0lvlLvd7koiKPVYgRnT6Dk5HCMWmJBMUWBYBSumYkNyMEyLpX3FWFmWCMgc6mAKU1NTdUgMhk6GFI3PYrHU0L6tJSUl7yNVNv2NBc46kBSMV0ydOkDHVFlZeTNZxceRfBjgDSsrK3uI9vVSJ9OwdOnSsxHnE+9yMkYPkSswY0VpZXdtYWHht/iirN9kQWA6Vq0mmUgkAlLQ3t4+E9PxHQ7Hd7Kzs62kb0Rtba2gv+GFaSCCUkoEBYG224jAtCImBvqEti46n0ntFAMTkkkMWDKwaJD2msiGmSzY2cgcSUpiGimD2aQIkui3RCIfyaRArGTVIp9BJSmZTWQBv47cBlhdNiMjo44URQcnY2IA6JRMJtOHJDNXIggZ+zBUR51WMDc393UmI1MPICzq7DbSFw3K7mWRxyjJ4bAcQwbpmgzSLYUkN5jKfBIRlOuIhGiJmHQSEenq7e3tJPnC2kBltJWTriqFN1YlRfCusD6afGBCEkdgXQwszEZKvHR3r6XEdBTU19fPwdgvfS/Ky8ubQY26gCwXucosYjqwIBjGeUk5rCYlUoYgUiwKhuRK7OFgDBWzZs36D8na50Ruj1f3IQ9Gfn7+P+NYLMY4RkRyuO2xfscQMZLCkf5CMsMCrMBMOurgxsbGi8rKyqbDW4uZd1gVm8hIFR2HeBUkNixPT0+vICI8Kos7EmHKoXu08zDzngcTkjgA7s3KysoFFRUVf+3q6mo67bTTLtjVOSAcZEVkYnXZ5ubmIiIYOdQYEdMxHyvM0t+51IicGFYhK6ORLJAm+m0FEY5nERmfmppaTpaHh6PiGaMByE9hYeEfidjuh5WNsY+s1pcH62wYjF0ByzNgKy4uXoO/oSfVWXv4JFlLJQIMjwqWf5hFem4uyd6RpaWl2evXr8+m40CKy4j0yI2IDPQeksPVk66sQWzTrsqg6OaboFeJXP+6pKTkm7F+bsYOMCHZw0BOgI0bN15ElsBN8FxQI1lGjcpMBMKLQEHaUuiYFPoth/ZNIxIyraOjAw2vKDk52UcdAVyVPURMusiKqKXG9hE1mgfp2GpqoF1kQbiQOwB5A5h0MMYSREi+qqure486iAuxDhDJ4T/iXSbG5IGyknSf8ZSSkoIFCZtmzJjxFf6GkYa4FeQ1UnIcpdF35FkpaWhomEHn7U8k2Uw61Lp9+3Yr6VFvYmJiGVZgpq0U+pP0byuGpWlrJ93pBPkh4oN0BUeSLn6Nzv0LyfXj9DsPQ+4BMCHZQwDzrqioWFRbW3sLCfsJJPgm7CeiMeOjjz56GLk5iHBYaL+BNiM1CvyO5csryOp8H0FftL8VCcHQgPDJY6iMeALj+NOmTXuAFPdx9P3J3NzcyniXiTF1gCy2WBkZm7KrkraV6u+InXO5XEnI/IyN9GamwWBA9ueSxsbGc4nQZKalpfnou5eIj5f0MlZbRhbbveh0DZ2fU1lZeSftP2T27Nm35efnr1Yz5zLGBjslJMpKqXrMSUeHuqcKNdngDwQMm7dsObWtpeUOeD76/eb321IzM+0Wf+B/Ab+/JjUtvZGsgMbUlOR65HcY7JpkCVjGvuTjA7CSlARcgdGMvIdcY8YIrCJso3XdqQRS8tvMCbbH582b+xTJtjne5ZmIgAcAG4I1x8KrCd0dqctH+/rjGaQvetPT0+uwKbv+p/7m8/th+GW2trXltXd0ZJusCTmki+fUVlZkq8dgSrvT6Txlw4YN+9TU1v11yV6L/7bHH2KSQPV2qUsixMr8G5OQQGi/3FJ26LO9ST9c3+ndu6HXn9cTCCWMfZEnJ4wiKGaFTGJR0CZm0ztID3mEyu48QWG6pcF0xjpN0hlyRydtFehzeS0sFQl6bXe2WVc/26bf+D1L58OHzZ/50e5aKusraxc/1+C97Kte4xFNbn9uhzeYOlrlnXooEuKbnpuE4OWDRoJMs64xw6Rr+o7W8d/vl6Q+UpKXM2pxOG6Px/zamm3nvBPKOHurwzu/zu0vDITElCIlu4Zd2YQoqOkRv4z6tU1jFJu9tpzNLZa7N/y36W6fYNt8JDBoNb4ck662JNG45ZRQ40tn7D3nZYvZ3Bt5zABC0tTSkvXHtQ2/e7XXfnFPwMUkZBTgFVqxQZMoNunskpzkhdxiQcgh5oa6RDZ9Tw15Bcv44HD5g7ay7uCssm7frP/pzCec3dPw7M9m2W/NT0uuGe61YMU/8s2ma/7mSr2x00dVL7xc84y4otkdyMa2SVgWvbW299wfdzT+6ZK5mQ/pdNrdGpLdVl45+5eVwQe/7bYd6g32cCbdISBLeISflHGDxiLKNAlirSZZVGustFdLmptVxe7AFwwZqnv9xdi+0CYd9cbXDRf8fk7iz+fmpG1Qj+lHSHw+v+H2tY23v+Ky/SAQCjGLHmVAoN1knEDQsX0kMkVaCDPLWNCHCk8gZH6+2Xdpl6898cH9Ld83m0zuoZ4Lz9+zq7Zf8qeulD94g0FW0IxxhRApgnqvyL+tovfOJO+21jP3mvXiSIdwGlvbsq/f5n7kG7fxMF7/cOhoFiZxn26WaNGYhJO6R665sYE3GDJ+6hDH/HSb+4mnje3fzUoLr7nUR0gwzvh+Zfupb/TazmcysmcAgXdqOK54uAiGhPYDh+a7r5e2nXfuvJynhroK6cbGjsV/7066wRv0MxlhjFv0BkLWP7Yl3HZgu+OL/PSU6uGe7w8E9P+sdv/ft27joWNRvskMeEMYewYg4Ks7PPs9UR649oakpN/p9Tp/X2+IscbnW4OXojHEs5AMxlAAT8lz7eKy0zyeF81m85C8JG/Udp1V0yOmjXHRGIzdRpXLN/2tbXWnX52ect9wz61pbC58qyN4Vohdr4wJgLccmrPOa2h8cnpBXtkOQuJ2mzc4PHvFsVwMxrCwrtOzt9fnMw6VkCxvchwoRNJYF4vBGBWsqGk+gD6GT0iamgsbe5Nyx6BIDMaoo6bHP62hpSunHyHBlLAWTyArngVjMIYDePOGMx29weXNE1NmsjRjosPl9Y1oUkGXqyex22+3j3Z5GIyxAOJJMMyI7/0CGDA2H58ijV+gt0s36USqUSfavQFBpG1I56UZcY5WtHmD8ryxgJYKl2fRCyN9qev1C3cgJCw6jShKMIgef1BU98SeGWui43Gehs7HMb7gyEO3hnK/8YIQy/cA6DWQBZ0w0XskSwUkb5fn0KEiV5G7WjrHsxvyszMk6LUii9qej15cjSJbGfR3plkn7+vwxY73RFtNN2lFtz8k6nt3TyYz6X4ZdD/cv2uQ+403yJTrPFwzAMkGrZQfd3CHPO0Kico5PdQuGnZTlgYDXlQWyZiN5L3JHRBO0qUGaluFVr38rcLlE7GaJdpuLrVds05Lz+MbUtsdDLhfkTUcxFs5yP32BEYlohIdo10f1vV4cbvTwQ0VeFF4gVCOUIjRLwP78TuOc+1GmaAU71iYJo7Otoq/busU99G2K1jp5rcuSBPHZFvEbRvbxTNVzr7fUC5cE7WFhuGOKDfqMUEXfqbo32KhyGoQLxyYTUQyJC5Z1iy2Or3irHybuJXKC0V85peNotE9sBEdmmERf907g4TfLy7+pknU7qKhoQ7NVCg0AJQrsi7PLbCLW+h+NSTEZ33ZIJqGSNgmEtDx4p2iO3JSp7Qn2irq2qYP9ynoWP2h/ncFqQQZRG27SIGNtMnNTTSIx/bNktf63reNYl2nd5fnqHKHMv1gWZPY7tyRqw5yYqayhWS5g/0Um/pMsX6LhQsK7eIns5LFsna3uJTug7Zx9+J0cUJOgni8zCH+sKl9ABlCG/rNvBRxSp5NvFLjFL9ct+v11nRULpRb6gr/jveLdvqnvdLFsdT2Hy3tErdvboc1t8vrTVTg+fHMEDvo07EimtH3tNA9jfSFxLxf/asAKYilL4cLyNKF0+zi0+ZeccXy5l0eD3n94fQkcdn0RPF6XXc/WZL9D5ULsz98Srkjf7PSMxk0A3+LBTzfI/tkiWKbXvxsdav4b1OP2DfVJB6ndgmj8aKvG8XKjoHr/M2xG8Qj+2bK8y/+tkmsjnFMJFAu6DJskOPI97t/qpn0QKb8fh7db21nfNYVHBVCsjTFLF46KFsqyRvXtopnIzrgsUIKWUGv0D0XJpvER/QCLycB64548YdnWMWT+2dJRfPjFc3ildruEd0HjTOPmGOCQm6GArDNAjon2ThwslJxgkE8R8ocn4+UOsQtG9v6FPN02vc8/TaNPn+yqkW8UL3zekzQh1l0o3sHCQAhxPXcOyFhacYw628dInnIMuvFA0RgZlED+OW6VvFuw44EWD0B6ligKEBUJqmuvmluivi/mcnC4Q2K/f9bLTq8Y28pH5hGbergHKnwbqI6/0dFV99vUCggvJeQoizv9kkFAqtmJIAyyzaHLcChAkQf8t3gHig/p+baxANLM0QP9S4XftMovmzdkffo7AKbuI/kqNrlF+d+3SDKugcvM9oaygXvhE5peAHqqsIdVvhzsLeQT2XDcw21vR6dZRG/m58mZRl1qbYLEH08h0qgJql49wF659kDssVMaud/2+4Qt21qG3NLGZ65vxDpO50Mqa1dXikzkZ5WeHI/OzJfdv4Pl3aK321oH/G9ckmPJRt0Q3aTahQPdJpJJ0lrJNAGnqe62p/a6ectveJ8khuVrNoN4d/2o99AnG9ev3NSbFb6C9WoBzxU8bieRvkeC3bZdvUDjJXBgDKrhvKjZV3i/u2dO+4XDPWVf7D77QnsNiFBh391SVJfh30VfX+lpnvM2XUSvQwoHeDQTIu0+t9tcMm/UfEoh0XRZBCo8YIKUsbL2zySkBxG5c4q0/e5lfcllgplCtfw+42uEV3/38Tk32/sIUWye9ZEJNBgoKRSY9Tjv4jovdMwuvcbT5hjN4qzCuxStpOJyF1anCj+sq1zxB6JoQKuWMVBIn5A9/yA3qkqJ/MSjeIM6twByLjqSRkP+LilR3boGOY8OTehj5CgnMflhCfwfdPmHtHwHuTr52tapaWK70NVxLtCOhkOJSTf25z9vUPwEvxkdYu4nowszyjeb7ziwiK7mGEzyO+n5yeI56u7qE5GbaWGmIBuwXAFMJPa2nmFdvHHLeF17EC8f0R63K7o+bQYBl68AIKKNgmdPYfa48IkY58XA7p9cYpJys9b9SPT46voWgf+N5z3cbT0KvgO3m+mOTxcH4kV7W55v9Ao3m8k2G1CAqv+cOpY4f6HNVFiM4qD0s3i4+YdlhHIw6l5NsmCEVfhJ20O6+oN6jxfow4Nz48xaSjew4hYgOTgZVaT1fdYuUMsbx/oPgIZwdiZfAiq3XMLbeIzYqoQlDPoPmCuKiIJCTwrFxXZSFna5H1gWT5T2SU+ovKikwGHWZhkEjeSVYwygVgVWAZW0xISODRgeIdgO33c1CueouvsamwSHfc7JKSnUYOHcMwnYUZHA/n4DtUbnuUzUuoYH08kCfrprGRxsFIneDY8I6wX5yBuwMPp2JvmpcpOAW5uCBfG1C+eZhffzQt3YpoYtt7lZG3jHaFewZTRIODBKY+wvKEg/rAwneomKOvt6hXN9K4tsq7w/i9b1izrC50POqPv0/sE60cMzYtVTvF6nUv+DvL663kpYj7Vc5KibBro/H+Wd4n/NPWMeWc/VOCdnJKXIDJMWrHR4ZVWDIYLnqNnUb1SOKaE3uOF0xLFIVT34Aaw3iuJeN63rUNsdYYTTX+HfjuflO08Ulz4u43ezwa65l/omFgeFygNFdPp+seQFf9UpVOS7WtJJtR6g4WZoFhWuO5ckqcrSYnvlWySHei31Pk/TO9RJQDwYl5AcnsOkSwrFdYTDAojXSPSQ4LhqaOyrCQziTJeA+/vX2RkvEzbroY+O+lZ/kfv8Bx61oNJntGJtNH5sE5B7nD2B0S2cR3U220L06SVBwXZTDLwRHm4Lfpj3AfDMb8kWTuSyvZcVZd4nI4FR5hN1/3FnBQxg0gFZDfb3L/jgjfwJvp9EbVZyHAnlQdevqepvUYO9SIW6o3v5Mp6e5Z+e7Kii+Q0VRoOzyh/o1go749KEun56H3T9TY4PLIsa6jN4Gr7USeF9lRMz4f7oc3DBY7hXsjFeATe0ynUZnsDQVFKMou6OjY7gQhJZ98xkB3U/fdJl+RYwvEGaPcYAvkn1U2PomvOJLKMa0FG8Z6h314lPf9G3cDOGfWTZAi/L7xf9BFP0LXQPpZSGSBHfWWM0OOxdMwL1C7fUHQMAP2KYZpF1BZQDniGowHycD61hyPpHcMLgnf5ELUXeGt2BtzhQyIk185Mls+M9g1CgjZ4EPU9qCvo0NJun3yuK6cniZOovHZFvy5HuyxzDErM0TbuXZIh+6MrSc+iX8E1z6X6OI/6OrR5tN1wv7BDho+l9/PDGUnSswi5gxcS7T96yAfHoJ+E+J/1ZT21Eb24Z690WdbLlzfROwvIZzkmG+87URrKkA3o8JeqnVJnoTy4DvrsDKXNod/6N71rtK2R8JrdIiToPM8h4YOSfJc6WYw/37YoTZxNyg6KEAKKY36/II2sTJt8EQgKTSNhxlALdOqb9IBQIA8uzZQKDB0pOl4Iyt4kkNu6vTEJCe6JCsGx6ADQMYIIfdXqFldSJaFiEfiGisxTrE0tlQXxIN+lTgYvEZ06GtgBJEA/Xd0q3m9wiZlEqB7dN1MUWg2iyYOAT9HniVGBcj26T5bIpusiCAk//2hmktgn1STH8nYFjIlvJoFfTHWARgXyBqK2X5pFWmLvN4Q7ZQz9oANLMYa9T8UJJrqHWQaQ/o2ELBbQOKH4GwxhkoNroFHCY4R6Qv0mGgZaGnhuKBkIMdyUKBsCCn+4ov9YK66HRgBFgk4ESgcdAoaPUOd436j/n81OkYq9nQQXHTnGKEEGobjT6XnQ2eF9qDEus+1WsSDRJBtfpJs/nsB7R0wO+MJjpDxATqB4IGuvKUOAePYHl2ZIpYdnwbFQhHhPX9FzwMJEo75/7wx5vUZq6FAyOG8+vVt4tDq8A+U7VbEGEayMzgKK9zVqK9+he8MA6FACpa1EyjMVRb2ACB6GS/D+0QngXV1anETyahbf/7ZRtj3E/NyxKF0q6Ga0RW1/dzTKdgUpz59RB45joOhxXbw/yBbe386ANg6FBPKL94629Q61q72ozaAtNlK5vm4Lz9JGkCG8PRjuQ1uem2iVhApxTWsGGcOGbOIYPDNKDcKE9jqLnrmVyoq2kRJlSaPzQjuCrELmZtL7WUJ10ukLiJerdwzlopWD4OmC4bgS/Mun+8ECTlfqGOQGw5cgn2hL6PzOyLdLS/kqkl10QtBjeH78BplINujJ8DBJY+3srxrGnScR9XhCjlUOTW8ivXTX5nbx3IE54iQi3y9T5wM5QdtGLMUNJBeoQxARxEng/YGgQY97SRf+CTE+uVZpnIKIQgbQNvDEgxESyBXqBMdjCPpMMtaeJfJ9Ockh3ll1j08SWjWQGrpR1TEIeO6I0jEgtdBRD1OfgntDhrtILtJM/bu7aQl6OYQIAon3hNeCThrG5iWkxyt3YVzCo/Y1tfETSIdDj/+ddDLkGAQW+Iz0Op4J9Yv2nq3oV7QD6Fd4XH9C/U4sQA4xRA45xDOj/s+kst21OE3AFsU7SaW2C0IcCdSRSswT9TrZdmH0Hvtpfb/j0M5RR7gW5Bzf4aHCfoNyv+OJkEJv4f64H97z7+abpH5BaAbIFQgSvOfN9O5xT+g+EEnE2oGEDlfSd4uQQAiOJaHFi8QwDQIaMdxwaIZZdm5gZwtIEYAAQOB+s75VvEAK4HYiBVfM2JEPAkFjIBOIJr5qebNklQia2y9t8MVD7Xq8KEFKIShepEbzsznJMggulTpbBAdtIuvzSyInGE4Kd54aqcxRFgjwNauapev4V2QBQfCvJ+FGLMpp9NIxlrqKSAM6RxAXxMdAsGWFacJuxAISZjTW32xok/d8+oAsWd6jMq1k4e18kTEowreJwOGaGG6aTS8UjBiuS3QaaocMtokXj3qFYKKxg9jBK2PSDtIxRLniZtCzoLFAMG5e2yZepQ7wQrhFiQ1HAuOJd5AigsUI6+7X81P7yJAKCNyNa1v6xZBEA40InSfq/PpVLdJliUb+570yJJtGwJZqZUOh/4AaPojjqwfniLl0P1gq44WQ4F3CyoXli3eKTukI2ncyKVw8F54Drm2QaxDMi6nTp1cr44BQdwAsGQzzgGAgyPJX69pkB/fcAdmD3hfKIFWp9/foPlBe6NBh/YAgWYiEPFXRKcn0dJtWqK/oCnpv6CC+aOkVl5GVg6GIv++TKQk0XOFQ8t8j6xbXh9UE78xBRIIRa6UCHs8f0HVgA96wtlUOg55IbfxBUkzwYL45BBc02tV2UtaQb3hj3mt0ieOojUMmQL7V2TGbunzi5M/rJakH0XjzkFxpqYFcDEZIogGlifpEQPePqL1upmu+RrJ0QPoO3QFvFtoy2hE6jGf2z5a/751slp4fFdA/p33R0BdDotMMuJ30IqLD2ULvGx5CBIQ/RB0fSOf/lSTLelexknTIGV82yPb3V7J2YbDA0Nq8C+t7TwOdEYxI6LY3qBP5nPRmC3XQsxPDQw/wBICYwcMHEnLHpnZp3Z+eF44JUoGO/ER6Vug36C2QlIfo97MLB5+BbJYePo2M33myrEtcPzeF9HiiqOz2i4OpH4HRCIJx45xUaVTinUBXQhZhCP58VavUpeis/6ToGHjoQFpRdpCZH69sEctIJv+xX5YkDyrOJiIJTzq8FSCTIMbogI9GO6P6uGdrx07rDVoMRgJ0ONodiCj6FpAA9Ikg4kFlutOf6Vq/2dgmvPQD+qSb5qZKD3GyYWgRLSBmF9Mzox94pLxT/GlLhyRgT+zfP1MH9BLabAe1MXg5HyIDBToMhK7Fu4NggTxFTtAASYpECvVp8M6DdDxAx91L5QfpfpzqEIYZRjfWO8JyDD2I2MJPqG3fS/0K+lDIAcoxnNg0YLcIyUnEqtHhgX3CIoTnGBYKyIgUEKq0fUgooEDXk4KJxZAhYHBx4aX9lwR/vcMjj98VEOQEBQchQhwDFN9RpBT2pkYPNy6Gemz6sFWDQCZEPM9UGCeEFEoTnS88O1DWM4jEoBMAOUAVQnHWkLLBsElkZD2UPwQO5QU7PDeiseG+M+168ckuFurFO4JlBmIDEnIECTSuCVaM4Zw6JVYA1t/NRJgwTGCO0I7qjJeYiHr/ORadJBUOYurvNIRd7u5A/yEC1AkaCd5ZpFtUzj7QDm+mbI4MHNNKBYNYFtTxfxrDcQV4HpDY8qhgRlgRUPAgJAn68TEzF3V20bTwu4UVdiYpKHQmqN4DqRPHkMiqDrf8jvcOrwCCNXPM+gHXwRAgrGUM9XQOYeooLBaVCDa5gzK4GXL9WyKJIEW1JL8Icj2QCLaB3hE8XhAPdOSQLZAnKEYE4YKcgBhgjBtTYWEFw1MGWcC7cQeDmCbad28MD+H9oQ0+vE9mv3Lh/cVye0cDSuh5elZ0CjA00L73pQ3eTBgPPiVYb780k7htQZq0zFRDDyVJGGJMDPyGsDwBdDjo6FG7cGVHAu/t9wtSpXfLGGFR4j6aYcyODd8vHGMBwrSN5BjPgjYLr9Uc6sBjlR3lwhCETa+XM4XGG9CxoWOGjMLCRqcOIgCL/uKiRElIYPlCdqDfMTQd7eXRKHoccojhTbT5oUAdcoQh+xkZIkd2hD2Qd5DxBb39SFmnHPbEsDjkHO8PZUQHDS/MBxE65sYIHaPKBfodeK0gEZFlxlsCYQnLoVmsOq6wX7lKSI8PhSt8Sm0Nxu8+dA0QdzV+Su5XiCfK/OOZyWSYJfSb6ABPBAjyUGaAykByS7j+31E6evR9kWFNEG2EAkCXw8OvAsHZkMuWYfBgEBGQFPR9qFvc7wt6P9Dd0Cdo2yohUYH2jaFJEBLEBoU9r3uIkMANegxZJ6rbHnEEkcC40uNECjxBIaPi5XTAqLFqoRTXo+gPuO60g3W0UcgiSwrHwnVdRZYNLG9Y5nArwcL+X1OvjMmQ16XyQejBTkOy7FpZYRBkm5zmppHjYx5lKhRKAKE2xCgLiq/Gb0AQt0d0rrj6Fmo8kUJijGVmiXDMxDv1PXIa2iXTwwG4eBY1mBVnwWuDl4v572hUUKzzkwYuw4I+fJDbyCEnlBl1i7gEZ/TMBjoPw27wEkGgIHwIktw/bUcGMQy9DJXo+pRjpaLRhRWNVbdjqtlwGXO8AKINIgHAMsCmAm7hQ+WYsTscCa+0gViii8dFnegjpsbvClAe6rh6h88vPiVSgam18ATA5fs8deoYU24n5Yd7ggSF21FIKqUkfXg4Q6fVSPkGnP6QHE6C8kP5odTXxpji65JTjMPHYfaAM2J82kNtBN6NZKVsYYs1tuDBSr2E2uMsKvNtC9PlUAeUlTqdEB6ROxelSwIETwK8JBjeiRU4bdLGvgfamxovgOdBu3ZFkW2c+YeFadKKrSDZhxJdRG2o2LZDYeMdDk0qd9zPrkyPhemgTktF3U2QVCV9QKd4PHWkqGLooEjPNQCvDt6VqkfUdt0RY46TWjdo73hnQxmaglxg2AE6FW7/f9e6JCHB0A30OmZsJiuxKCAIGL7Ed1waOmWHjtH2de49EVOWMUMQ7cnrHdjvqHll0JZAMCN5AWIuIv82DiKDMDAwfLuU9AO8AhhyR3uHJ1GNg0KsIWbEwUAAgYKRu38M7z+8+IZB7qPqEbNOJ70dVNsDjoG3E0Nq6O8Q0wipRptSjdmgco2hAHoGdRmpt9RUAyA4g8Uw7i5GTEgw7ga3NKwtuOcqlGAtuPbgJobVMy/RJF2peEmFCXpibskyoA1DIipQSRsdePk2cUSWRVrp6KzTdzEzBpWjEWH2hyqG5YWGhQDEfyhBUer0JVVxfkuKD0IIJQjG+gkpXMR+WPQIJKXfSKF8SfswrINrQXnBuom02tE5f0nHwqOB7++RpYlANSh+6Xauc8nvakT+cVkJMuirNMYURwTmYbiqQHGXvUfMd43SSWgUtz2eEUFmj5Y5pOs3kpD4guGGhUYKdh4rnwiCUtGwwWpvJSvx76VdMnhPBa6P+6AdwFPxNNUdLCEMC6nAO8aYIJ7vlFybaCEGiUa+vG1gxnYM01Uo9/vVvBQZCIkgN4yXojNCEJdx/EwKiQkUD8F1iVTm5SQzd2/u6At+/B4pl3Npw/j6X7d1iI0kH4dkWmTMwAolqC05wosAkgerAh0mSB+UNKwLKB7/IAobkq+KHNoHPGaoR3hIYC2+rozNqgrfpIyrg7igAzmPyrey0y2V/XFySDUs1xgOBAk5PkcvriOyK4PJzXrFCAhfC+PiUNDIcYDjcV+8f8goAt3w/hIStfJotNETySipo33RCgrHwWt047xUKbPo9BEMp9YjOjZV0YEEo54xph9JSLyKBQjX9iHUSX0TJW+4EoyPK2YkSsWLtgxDJJL4RQ5/wVr+O7UjeCYjCQmSF0L5QnedQvW1gd4pvEZroggbnhAeJ8Q/wdtyUVGiDO7G8AHq8Ku23gmXpwQd/5Gkd1Hq2ze298X3oM7uWpwu48jgwcX+FiIMiE/4OXV6GK7GMIWKUCjsGQkoQcaIN4FOzY+w1GNB1eM4H5ocbn4MPaL/QEcPfQLDQE440IaJQWmEjrmZdAzCBTCUnQQdQ20QkyH8yjRW6Omfk6zjWimm/gYB3iWGHyAjnzf3yPeO0uC5cDxE2qf0LzBI0Och/i8a8AR/b5q3b5gW+hpxgDgvrF/DBkKzxy+Df6eTnO+bZuo7H5wfzwcjGfK3MsY9YAggz8hJuXrxk9nJknDB+NYp+VsAuzJqgH4B/SG8SngWkBjAGwzH9AEYeoaRi+NXxbgfPKwr2j3iZKqfa2Ylyyn3SEWAdoOYnDW7yHkyUoyYkGDMFJYBGvm/61x9bqd12nDAE9x9IBi3b2qX0c+YhYDgSswQUEma2nYR/HI4VdAhmWY5JimrLOqYaKgWqToejcbwk1XNkjzAxYjTWhXXC8Y9EQQMFxrGwn5KL/THVA5UNMoNQnQndToQYozBge2eriQY0yhlUL0nAWU4CAmlDiAl+fd9svqOgTLH8BEUOBQlrnMY1cFp7Qniz1s7BzwDZlmgoZ+aF+40XlOUv/rcYNkHpptlh3eoEigFoqNadHWK52QpdUKXk1L+GLErUZ09vCuYRncXWaNHEjmCV0u9Tq+SswQN6ioii1BOzx4Yjm3AftwHhA9WAMbafz4nWZILbIj7OO/rhgHPhDLdtalD/HlJuhyHRZQ8roVgRow9I3isMMaspfEEBEwiaBFy+h4pFngKVDHEENupJBuYJgplB/ILRYUYD4xRyzFj5R3gO4jcn6n+QUJwTcQfBJTZXD2B2PKNztqquPYh3zgGnTkCVTHcWKUE28E9jFupihbKDgFsaJtP758tywwXL4ZP4HlDPNSd9A4wdInjnj1Afdc7ZBvBa/dQeUF+YByoQ5KQF+yHVwjEFwppCckdyMAnLT1SDiOBe2M6OMa9QcZAyj6NmHmHuA54UTB9GcOS6jmQOb9CtBEYDyWeadLLGLGLvhkYMP45tTPEGGB4DUGO6HzU66jPBP0EI+i79Dx4d/jdF5EYah21fwQgHk5t9e690uXvb9V1i/9bOXDsFUO5T1D7Bwn5w6KwfsB10PYeL9t5wO94BGQX7v1Weh/o2BuUIHMYcJhpkpdllfIEHY24kd+RUQOLH5vKp8NkgsghEbJ/1ThlnA1iJG6cKwbo+mgkKskt4dlCx4rO7vo1rSLLohNft7rleTAi8T7t1CbsBo3Y4iQdQ/r6z/SuEPtyboSOQfI6yBbk+MHtneJSMgJgCGAYSn3vKil+Vwm2Rn6cuxZnSGKCXzDb61OSaRiaaPunK+0dhHfZtwM7b+g06GoQEpQXMS1tStA5rocYKnhIZtiMMjYSQBFgMOP4Dl+AyHavONVCurXQJj5ocg3I0YN+Ac+Mtoug0mcO2HEdh88v+y4QFvSDiOV5RElyFu67gn25qaBHQDDRr7yaniPb9dlfNvSbqQPAkELiQQyjI0nbywflhMtKz3X/tk7Zl0YHj48GRtwzoDP9AzFqTJOKHAOD4Px2Q5sUcgg3fvo9HYeGPFtxOSMmAhZNuyec3AjCc8WKJml1wuqCQlenGrV5YruGPiRlhqj67d1hKyasFPoHQ0JxIlMqgqzUqZWPUKOCkMHCMSkp1zHco7rv0Pn+bE2LzMyHTkTNToqO/Ssl2BLnfO/bJvlSIRxgmWhQaMBqXdy3zSEZJqzDT5pjB2mizH+kBgTPAToOTHmNBCxMxFaoZcWV0WDX0fG4H26FREKoN3RisBzwHHfSNSHAPoW4wAJd11knvT6wHrEPHiQIb6Pyjo77tE4cRQpZdcd3KZZ9Pf2O4x8hJbyG3jUCLNH9wQIOTw0LR+U7lfsB/yPlfCJdDwFkcKk6qP5RBnWqLAKu7qDGhVgWNR0zZpvA27A+ThkCIwEXMlzF8GK8ERUpjk7y9yTfILlQtrCQMYMFni40ULhHIbvpRFxQxzj3KzrnTGr0mCaK3xHX8z06Bm3FGcPHj2ESeMTQFhBTBcBb8VpUcr93SdEhzkkNkoRsXL2yWZIeyCWUDdrn5y07lOhmIs0nflYnA/fgzkaHimmw8KSpUx0xRg2ijIRh6vRjXBvtRiVZ55PcwWOHYc3BEpxh/01rW6W1CwVWFTFrAfKJdga5mEZWF2QK1mi4nYXLCw/muV81ymD0KiJiUOCoA3guQYyCipKFvkHnAtlUhwYh198q1v5D1DEhxgRxOCgvdBB0xypqn6gjELurqN7wPPA2Yt9K5TcYGKtJJuHBwfv20H/QZ9A/uB/0QynpIHhmVO/IF1R+Pylz1WOJoTVMJYUBt6usyHsakKvblbK2RixxgTp6gOoNEwPUpHDw6ELWQGKEJiQWESFHZ42271Y6VwRCg8TDC436A6mFJ2OwJIx4l9DReF/dSsbhb6MsdsgeAjAx/F2ryBDI7EmfhXVaLB2DDhgd+AdUFsQ3ot1BvjAstFy5PnQ9kg7Cm7C3EuuIdwi5begNXwcGKuoGzzxYMDLu9Y8Kh9Tn0IFvRcVKQs8eT/rwyExr34xNlBf6GjM58czXK/UGL+G3bR5Z/3/e0tnnuQYw2nDyZ/Uya3i22nbpOhXdYR3gV/qDY6ht5yhGH0IRoKe30Lkhpd6gi+DtgH5DHSAQHKMICOIN64OwToIXCnV8jKIr0A9+QrpdzcyMNoxkdXgmtW6gN/A+4ZUfSS6yEROSFwfJIooiRAav4gExq6ZLWesELkC49FBYsFAwO1jmyC8AdzQqCAF4sKrgaYHiiQUE2Hyxi9kYIEQPRGSjUwHluGknke6oR5CIwYgEAMGFxffpIMegMcKFjm1nQAzK9hhlBEJDKCsISGRGV1jU22NMCUZD/WfF4BYcrO9nKgfPDIuGBhfnF1HPg0ay1TmwfMgz8+QgU0RR5oeinhljqx8MMRBurAHCCUIQC1DgT0bUI2T1KrK+EDsExbwwySyJBNzGG5T3BqWMeKbNdAyCfZemJMiOE8SyJkYHhYa+qyy9QCwZhVzCQ/jhTuoS5PyVmp1nLkYn8PJOjsE7fH4XGZnRBuB9e3+QsqgzEXYGKLrIjmCw62E4J3pIRwXaEVztsdztKgZ7nncHKR9Ik0qcooEOb3nEvUDAkZdhPALG2H+bYr8fkJEvlWeEHod3G3oZ7wOGzV7KzENMfUXHiK4WuSxqqQOEPsFwIGbEwNgaTA+io9wSQ39EAjrtyRi6Cx3tYDpGBcjk6p0YOdD18O5Fe/j6XYN+21VadrSpaJ0WCQyDPrUTGQChi24L8MRHQ/VWDwbU1b92kpUcbSEylisSsXQenmswPQBPU3RW9l3V5a4w5r5zWJuY+js3yRReeyMUVuovljulmwsVBGsTY3QYhzcogUlgWBhe2dI1tpkCGYzdASL/4cE6vcAmrWUQbQzdweoEIVZzZZxMsn2+PhyrA8LxMXUCd27q2CPrPjEYuwvEyCCoHvF1iJGDDd3lDQd0PloeToIF3R3OrWSQHglY+TCEMGS/M4LMYKgYc0ICD8g1q1rkdFKzLhx8B0JSqkyZAyrpOxKKIfgH8WjYX9cbnj3D6poxnoHo/Iu+aZTT+dSFHmtc/n6ueUzxxpCBXOwRsSP+kPQqDbZaLYMx3hCUQwgdctgmQRmuhNcbQ8pqTAZ0/aXLmmVArEkxPjFEgiGQyZ52nzE6GHNCAjGUwxI7WUgLY1M7G5ZgMMYr4O2Ino8fDYwBt3kn3yrIjKkD6HGQ7J3FwOCYWMO3DMZQMb6nO+wBqPPLdzZdDxHniMSG2xGR1BhvjjUGp6bSR8BbyxBX0mUwxhKwZne1/DkCEOfajTI2A2tCIdFXLO8NYr2Qhh5j3TzUxBhPgBo3KbmldnYMgs8RlInwAcR6DRYniIkX8NCzobxnMSUJCSgIIr9/MM0uupXocARkIUAnFsOHKx65CzDVEZH4W7t8sQmJFus9JMlphKNFSNDIsHYGst7uzMvEYKiAfGOmz14pRpl3ADKEAMBnq7piEg3kKkAabRDpH5cky2DNWMeBuIBw/weL441SWRclG6kdJopfrW/baWfCYMQCpnNfWmwXJl04OzRmUcFwjBVEi6xOSH//QlW3nMGJgNvBCAnS4v+vyTWqhOTmuSlircMrCT8jNqYkIUH+CCyyh+l7mMUDPYjsglDBYNFFVoMMRERmveiEZmqaD5AUKGjEvcAC3RqxTDcSLCGPBKYzq3EwYOSYRgxvC6YwYmwVHQVSMtuUTIQIhkTyMUwfxRQ4LMyGmANMRcTcckyvwmwOTH/EfHT8jQyXuAfKjanWSIaF5yjr9sZcRZYxuQG5xIJ6yE2D6Xd1ykrSSJoEOYVHEFPRobgxw0eddq3mTlHlG+1hjpLArUHmPwkfF17wMTy7AvKqTpfPofaCJFiQY8zCQCwN2gamMeIcTPuErCL1ul4SJL+ceYByoL0hqB3tBddErA0WwEPmylL6GzMLLMriX4hDg/yvd3gGzW3BmBpALqr7l2TIGUyYnguZg05VF5zDjKB8ZcV2xLpAz0cvFaBm5c6W2WjDOtil5ORAkj5M98UMovJuv4yDgSxjvS54HpuUdgGdXkRyDj0NMxS6F/lVMPUWnndMp0f2WOjrBdSmWhU5B+mP7mv8yuKmOBePgXi0RiX1wlTAlCQkiBSHJyRyGqCa0hzJq7CSJHKAnFcQzlCInCTRgBBOI2HC3Pir6Xi4sdEoIESYdQGljERwmG2BXBVI2gRhRmIrLN+NOfIQ0N/MT5VTMHvkkvUamUUVncb/lSTJpZ7VaYcgKWmmoNCT0J5RiEWhTOIzsgKQ1fCRsi65mNldi9JkEih0Eg29sdM7MyY3kEfmnEKb+N36tr41kSDZkAmTsqZGmkkrvXzIpomEgLGAnAzIyphEihWy+LsN7XI/VuZekmySnQEW+kMOGihnJKmCZYpOAHkL/rq9U5xG7QyeSHheQkqCK1i0OPdH8ppt8ppoF3Cfo+NA+ZECoEVZ9RseGST2Qyp/tCdMQYQlvCH2rGzGFAKyjcq1Vki+1JwXagJLDC/+cXG6HIbEGjDIu/NQaeeAa4CIgwBg0gWWg0BOHuRIwv795SrAAXFlTpKcTQRdDM8jpvqjX7hqepL467ZOOZvuTjJwX6FjkGAMuhceGBByLKGC/C7I9gwyBMMAsl7lCi/2+sOovublGqf4BfUV1b0+aeSCqDQOPlt90mFKEhKw123OgU5nkImLiuxSQJCZsIrYKdboUdeXiYRPWTcEQmuh62GRNRAStAvkaEHiOMSUYNE/zBuHsJ/yeb1sQL+bnybHKJGrANdBOn2wcrB1nCOvqdPK5DVoBGhqIB8gOSBCWCPkJ6ubZTAl7gc35N1b2qVl+T+OX5nSwLAhZCfW8B6sLriij/usVs70gWI8g/4udQ10S6PTh6KGxw0keaaypDm8GphpgbwJzx+YI6d5IqkU5P2fZKmibf3r4FzxHLUBJKzC+jGqixpeFyhq3BfEpMRmlJkfEfD7vrJgGDoOkJqbiVBh1sYj+2TJJIogM7AosbzCSBIuMSYfYNDBQxE91Ac9igzRazq80nuClWuRaTRW7iw1CzeyaoMsILcKCInM0kx6GX0BEtvB64i8VxeTMXnpsibpzUAysfNJvyP/iFXJEN4Z4THcR7kmPPIgJEhTiWH/dxTD9dYFA/uaN+u7ZTbalXWeXeawmoyYkoQECnBRsmnAfowxolNXx89hocFVnGIYmCIXa8dcThbmpcubiVxYxcERi9GpgGsOaXdxzUBox+JTUMJw8a1o73/8KbkJcoHAy5c3iZNzbWLflIELMMElD7e1muoXMQJw+VnH4SqijD0PiC7kFrl/ojMXq4vUuZVFJpFJFqmzK3r6kxd46G5fmC49LPDw5S3N7MuAqgItxOkLSIUL7x0syfDiXShDUHpSYl0THkJcs0C5ZjTQVlA+NWi2i+4BC3ejj4MLGf0B+UUcSALpw84I2YR3I4XkEpmB1QXlsEXLJIDzH9g7U5m6rxX3LM4YcAymN2OJJRB9mZVb0b24f65Z128ldgCJP++na17wdYPIMOvFnST30RhOXzOVMCUJCdxvf1mSIb5LBADuPq+yUBEybSLNLzwTSJELpY60vGp6dTVHCtbqAjsPp0wOilhT7CGkSC2PVSOhhJEkCDEkGI+HtakmhYsElDvScoPxq0vCh5Q1ZdSUw7g/0nwvpY4EHQYWU0OOiy7OacEQ4VTST5Q5xFUlSTJOCani4XWDMoa8dslssqbwomXJRplpGNPuoVPBDyBFIC6QxeZOvyTSseQb3pMkIiK4DtTxGfk2uSor9iN7NyzISJIOaxIyjDiRSIM23J52rASO5QrQMSAJF8qL1PUvVneTBTrOV2Rk7HFg/aCLi8Pr6mBCAjp3ePQgXnJhONLvWLsF3jgMiav5ULAEixoPguFGeOJACJIMA406SN0+ZBgihgTZi+EhRDxfR3NAxj1hmQBX1DowIBY+ec2QSI/4CbFPScpCj+gPsFRGdF+DVPJTGf0ICRYNxUhEvAqzpwDBvG51i5zKC9LgV5QuUvs+WeGQ43q3LUyTivOB0o6+lUox7IJhGYzR/6O8S84Q+P2CNCmASH8M0oJhFQypwBvSSxd+FQvmEWm5e0uH7CS8dFGkMsYQTJFVL12OKrmHRYvxyVvpmnBvb+7yyHvDFX5eoV0uivRspVOuIYTFmuBmRENCWmU0MKw9wAmIBodWKyb9WBZk6dW6btm5w50MhYqHric5Qzr6325oFZeS7LiVzh/DhUlklR1M7QBDL3BrH5BmIjnrkooeJNqvrAECmQZh+eW8VDk0g2NAPHAdBGdjkTwoDwwfglhj7FylESDZkGMMiWLqPK4lg1jp3NWdbnHj3FTZdtDO0A6RphzJuBC/hVlraBfIgcHSvWtoNJqQJqzSJjWLg4z/dFWrOKMgQcYd+RQ9/h+5vIBLTLPpxa9IJlEJiNkDGcfw+XcyzOIp0pmYXQZ+gDYAmYa8QRYBBKYemWWVK02DuCCtOnTx7ze2yTgSGW/iw4KEThlEizagEm0Ml37Y5JL3lsM0yjVfq3OJHxCBQp+BWJEnyx3iiqi+Bu0XfUL3LqbqT1b0ERK9Xu9PM+paWjyBrHgWaE8AcgNFOlhA360b2wfs+3tEnn91zYxfrmsbcNyPVzXHzG2Chck+H7AOjE/8ev2Oa2DmwU0xrokG9p+o1Ms3rm2Nce+Bq5NOZpBV3QvlO9TjsyzGxsqQmDWWZRoPCCprxMRaJwaW44r2/nLSE/D3yWHk+iyxxrDh2oarOVphIrgvGpHr8eAlxWoDABYRi8SbpLjfjFqgDJ7GlWO05Pl4hcWgH1G+dbvV6rQ6tC6XP2gb7TKNNyCg9KHtsSOc/xJjhXXE5KkTBW5QdOjy9oFyBQMSatys1fRNeAAQK/WLGLr32tU72hRms92+aWDfglQR163uf69Yfc3vY+ybzKD+0qvX6aRrqI+QGI1G7yy7cXOLp3fSE5Kxxs6SrDFGD3Psho0GvX7IKTEWpNvWfNsiDh3LMk0FTFXrbU9jUU7ampGcl52W2pDeLpqnAiEZS0CN93BunDFHtllXl5GaLBldHyGxWiw9Z6W0Pb28XXMQdajG+BWPwdg1wKpPt3ueM5lMQzabT8uzvfRml/fcqeAFZExsZJl1DSfOyH5jJOfOKMgtPbqx5u0nXOLa0S4XgzHaONLqea8ot6gS3/sICVzfZ87IfOHj9obj324PnTUVYkkYExMYH/9Osv6j781Nf0Kr1Q7ZXF+an7Hshw3b772r2XQHGT5TO5ydMW5h0Gp8Py8w3laSk7FtJOfD/f2jIvM9K92Bg9Z0evYZ7fIxGKOFmXbD5mvnpN5tMhqlYdkvqNVsNrl/M9t+o25jm//9bsNpvYGQNT7FZDBiw6jVeA5P1n/462L9DTarZWBigZ0AcVKXLCp82LmhKfmZVs2VHb5Q6liVk8EYCdJNuubz00JPnleS+g+dTjfiIOz87KyaP/mbr7xxfddD6/3mpb5gyDCa5WQwdgd6jca/wK5f+/vC0E/z0pJq+vZHH1iUmVZ5f5Lt0vfLm1//R4vmR6va3fv7g4KFeTcQ0mh0GhEKihBPEhgp9Frhm59kWnthiuexM+bkvmg1m0YU8JeYkND16/2n33zo1qr/PtNpvPLjpp5je/whHmsfIUIa/NPqtMHQ1J6vuJuwG7SOI7MsH1yc7Pn7ATOLvhxOsHYs4PzFBVmrXkyynPBCpfPSV1sDF2zq9C4erfJORYS04VEDTZBTYO8OZicaN56aEnjpohlJj6cnJ/aLsI+ZhwTj8qfOLXjl1LnilT1TxMmNp556amVOTs5fjj322GfjXRZGGIfOLvroUCE+inc5Jjq2b98+b+3a1RvPOussNlrGIZITEzuvXpR479VC3Bvvskx0PPvss4/rdDrH+eef//N4l2WyYkomRmMwGAwGgzG+wISEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhGSM4Ha7LYFAQIfver1eSzC7XC4b/jaZTB7a54tvCRmMkSEYDGp7e3ut+O73++VnT0+PLRQKCY1GE7JYLD34jG8pGYzdh9frNfl8PgO+GwwGvU6nM6h6nP72GY1GT3xLOLnAhGQMQIpZs2nTph+Skj4ef9tstum075rVq1efScTEM2PGjNuysrJWxrucDMZIQGQ7YePGjX8nZZ1GhEQq51WrVr2CT7PZXLtkyZIfkeJmws2Y8CgrKzuuvb39h6S/daTHF5D+9pMen0mEO1BYWPh4fn7+G0y+Rw9MSMYAENCEhIRvqqurbyeFnYB9DodjEbaMjIz/kQVZHu8yMhgjhclk6nE6nTWdnZ3nk6LWYl9jY+PxUNKzZs26ickIY7IgOzv7661bt/6d9Hiuus/lck0jHV4zZ86cFUxGRhdMSMYIRUVFa6uqql7q6uq6hP7UYB8Jr58E/N7ExMSOOBePwRgxiHAEFixY8Levv/76e6So89T9JNfrcnJyXohn2RiM0URKSkpLWlra/c3NzX8g8i37SxDv3Nzcv9P+2niXb7KBCckYwWw2u2fOnHnfmjVrzgkEAtKtnZmZ+d/8/PzP4102BmN3kZWVVUPK+snW1tab4c6Gkk5PT3+Utrp4l43BGE2UlJS80NnZeaHH41mIv+12+yYi3i/Gu1yTEUxIxhDFxcXrKisrX21ra7uYrEp3RkbGfQkJCc54l4vBGA1Mnz79aafTeZbb7Z6blJS0urCw8PV4l4nBGG0Q+ahOTU19srGx8R7EBxIR/ycR8sp4l2syggnJGCM3N/dBl8t1nNVqXT9r1qz/xLs8DMZooaCgoLSqquppUtR3Etl+iJR2U7zLxGCMBUpKSl7u6en5PhES44wZM57n2JGxAROSMcbs2bNXtLW1/Z0U9pdarTYY7/IwGKMJItlPGQyGRQsXLnyGlTRjsiIrK6s+KSnpHpPJZE5JSWHiPUaISUiQZ2BzZc28Nzo0565udu5X6/IU9viCCXu6cJMFJluq2Vffc3VwxQqefTACJBi03XkJppp5KdZ1Z6aHnp1fXLQBgZW7c83qxqaid2u7Tv283X90nctT0OH2p41WeacSNDqt1pSQanG/tKoi3mWZqMiyGhoyrcaGI2yBD04syX49JyO9YbSu7fP5DV9uKT3kPy7Tdze2uRbXdLunBYJCN1rXn0rQWxKMIhTU+Det/G28yzIRoddqfPk2U/WcFOuG4629rx88b9bnRqPB2++Y6JOc3d32p1eVXv5Eb8pPa3v9hUJY8CbYl7I7QNepN3Id7gZKPWLup42hY//dqbvgivaK+743N/uxZLtt2LOV/H6//p1VG797T1fy77e7dHMDIZ1OaExSzBkjBGi2JTU13sWYqGgIiQLhEuKjHs1Jr3Y7vnddkef2o0py3ttdj2pza2vmHze23PKWy3JuhzdI78dK1pF1tIo9NYH5kha2zUeKGp+Y/nWzOPzfhoQLT3OUP3/dwuw7clKT6tXf+3WR8Iw8uGz79Q91J9/kDQaMe764DMbO0egO5N7ZoLu9UbTl3LrUcv1wPCUISHt/7ZaTftKU+HRPIMBahTGu4A+F9CtcmgOv3u577h+h6tO+M6vo05EOgzm6nEk3rah/4G237RzS7KNdVAZjt9DpC6Y81WH4Uf12d+Hf5+sutNtscrJHP0LydU3roY/1pFznDQaZjDDGLaC4n2sKXnFYWeP/jp2V9/ZQzytv7ii5uzPpjp6Aj8kIY9yiyxdM+m298a//yu4+Oi3J3jrc80G8X67q+v67HttZY1E+BmM0QExb858G1ynPGt2XX7VXwl9BvvsIidvtMT/eFLqm2x+0x7OQDMZQ0BMIJTzWor32sCLPh1gbaCjn/LvWed42Z2DuWJeNwdhdbHR4F7+9re707+8757HhnlvX3Jr/bEvo8mBIaMeibAzGaOKFDt2lJzU0vVaYm121g5B43Oa1nZ6l8SwYgzEcrOn07Ovxek1DJSTLGjsPCgm7ZqzLxWCMBr6pbjp4JISkqrGpqKHXlj8WZWIwRhtVLt/0htbOnH6EBMF+Te5A7s5OZDDGExy+YDLc00M9vtrpmSbM7ABkTAx09LhHFCjscLqSu/zWpNEuD4MxFugNhKxuj9eM732EBIrdFwwZ4lesqQm9RiN0WiGo7kWQsziMKfws33sUYIoGbZgvQr5ZvPcMAsGAjodr9ix0mrCs+4Myxi3exZmw4ImocYSFpPi+vTPEiTkJ4s9bOsSD2x0szIxJg7mJRvHMAdnCSF3j979pEqs6hzSyxmBMKICIXDszSVw/J0X8q6Zb3LS2TbgCPLNpJGBCshP8bn6qOKdwcBd/TY9fnPVlg+j2j0z44B1J1GsFjMgkg1ZoYEwyH2HsAeRa9OLhpZlihj220whi+HRFl/jz1o4Re+5AuM20kWgLq54NdkZ8cE6BTfxuweB5DwMk4Cd+Xi9qSZ+PBFDbSQYdfWqEneRcivpupW2cumBCshO0eQKi0hVOrppu0olpCQbpel7v8JKSDonG3oD8HCmcRGSuW9MqMunauI+Px2wYewh+krW6Xr8cLoRCLbEbRTIxh0a3XypmSGKHd/e06tpOrzjjiwZJuEu7OUkxIz5w+oJ9ejyB2MIcknXI9+Yur+gNBEUgFG4PI4WXzr13W4d4ra6b+gQ/YttGqeRTD0xIdoIHSx1yA84rtIt7l6SLFiIpZ35Rj2mnfcdlEKE4Iccq9k4xS4EHsfig0SXeqHOJ47Ks4phsq0gy6uT+NZ0e8WpNt2gjZQ9FfWa+TSxKNoq36Ni3612yUziX7gV3d9iqDIkql188VdnV11EwGLuLZpLjq1c2y+/w1D21f5aU03/XdotbN7YLVbwho/OTjHJYscRmoL81opNk9+Eyh2h2B8SFRXaxkOTXqNWSIg6IDxt7xP+aeuXQY7pJK64qSaLrC/HHLR1SfhfQtU7Ns4k8q17eF23iy9Ze8Topc5efpZsx+niPZBIbsDjZJN46JFf4SD5/tKJJbHHuIMo20rdHZFrE4bQlGnSIqxQrOtziyXKnbAPwtGSa9dQ2QpLgPFvllDINQn9YhkWcnGsTK9vd4vFyh/SWnFsI3W4SKcZwpv5mj18O6azp8LAeHwRMSEYBF09LFL+YkyKU+D0pbGuV8fJT8hL6DfucSUK9hIT0mlXN8rj9U82yI9jm9Ip3GgQpbJP47YI0qcQjcSgJ/GXLm+QwEYOxp4AhxT8tThf7pJr79sEC/BsR9TQi4j8oThTTbTuGfc4psIv/I6IDcg0FfzzJtkmnkQRGRwQEbeViOidSvE/PTxDziYDfvL5tDz4Zg9Ef30k3i0f3zZIBqipckpk7xQEk/1fM6D9x6chMqzhTGbKH1+VU0vUa0upPVGikkfp70uN2Q/+hylNyE8T3v20Sqzs4nioWmJCMIjY5vOLoT+pIEWukGw/4y7ZOcQtZnFDiV5FA/3JeitgvzSxSiTW37cQlXt7tE+d+3SA7hJcOypFM+5B0i3i+2rmnHofBUBDW0LeRHINYpBq1oskdkPEhlyxrEhVkLZqIjT97QLaU7UOIPL/X0LPTKz5P1uX1a1rFtbOSxM9mp4hT823i/u2dWBpgTzwQgzEoINsnfFYnh+wh4/D2vUkE+71Gl6jvDYhjiWQ/tDRDegbzLHqx1ekd9FoY9rxUthG/eGb/bLGAzjkjzyYNVh6hHwgmJKMIyBeEt9MXljR4TL5DJOKcQptkzFadVlqJIM0Y2tkZIQEQqL2diEmp0yf2T9fJ4RwGI16AAsUQS5NCGlIMOnFBoV0cnmWRhARxVqAuaqD2zoBJwGgrn7e4xZUzgtIjiIDARo4GZIwDQPf2BkJygyhjqPHyGYmiOMEgjFoEa2ulN2RXOhk9AZws9b1+sabDLUmMnMCwR55i4oEJyRjiqEyruH1RmhTKamLIvSIoQqHhr/wd4KnAjLhioPxBof56fqo4q8AmOr1B0eD2Cw+RFdswr4yg8JAQrKAZ4xYYRv/jXulyZloFGYiIH4ROjh5W3xUCrMZ3CSYkYwQ5c8FmEAaNRpS7fNK1XWDRicf2y4p30RiMYWKg5oUHZJYSO/J+g0v8en2buHluirgsapydwZjoyLWEvdMYhv/xyhYi3kHxBOnxQit3n6MNrtExAsjwV21u0UtCPIMU9xdH7VhaYqR5SxiM+GCgaQdr79OWXrEw2STOL7LLjcGYjNjc5ZNT5GfZjeLdw3asrrI7KR8YscGEZIhA4N4r1d0yODV6diKCWV+uccqAp0ggcOmUz+rE6fnhGBKgk87H/HeMKUKev2jtFR2+AFb3lH83eQLiFboWAqp6lDnyHzf3iho6fquTczkwRh9BgViOXtHuDchp6ZHiDavwv029oqzbJ7ZEBe/9aUuHWEfHH5ZpkePqkNUmt198RPKKWBPkf3i9ziVjpjCsg8GZFR0eYdY5xfL28CyDNtr/eq1LJgXs4vwNjDEGZPzV2m4ZvxSdL6SO9Df0OOS2NyLTanWPT5z3VaOcxotcVACGbTD1d5PSJjaQ/n6p2ilnz4Co9JLR+RrdB20CqSKAlR1uYanWiGXtbp72OwiYkAwR37a55RYLiL7GFgtIorbe0T7odR8pc/T7G9N/f7Kqpd8+zD5gMMYKCFZ9OEoOVUDx3ru1I+ZviBnB7ANssQBy/ct1rf32QWm/FDFTDEr9pqhjGIyxAtImXLe6JeZv6x2eAbpXRS0ZhPds7Rz0uu80uOSmooNIzS/WRsl+TbfcGIODCQmDwWAwGIy4gwkJg8FgMBiMuIMJCYPBYDAYjLhjyhMSvSY8pdG/k4hpTHE8MM0iA/jyrXo5rl7lGhhgqlOPc3g4QI8xLmAm4XXvIiVkllkn8i16sZrk++B0i1jZ7pEB1dFA4jMkhlrV4eacCoxxBWhxg3ZHhuzBjtkrxSQnFCCRH9K6YzJBLCyh41rcARk7wthzmLKEBAr4vEKbXPQO8JCGxWJJlTGIBkjL7QvTxFUrmuVaHNudPvFkxcAgQDQIrF/wk9XNMup6NIDZC7+cmyL+UdElqnkdG8YQoC72hQyqyAkpV9slmcXsAmeMKedYp+bsfLv44YomceeidHHRN43UDgYeh2yVP5yeJC5f3tRvccndwSy7QS62d/+2Thkky2AMB2mkv7EWEogyRBIJy95v6BHftg+cgIAs2dfOTBYvVXeLXKtOzLIZBw2ovnpGkvhfU8+oBqFeMT1RzpT8rKV31K452TAlCUmRVS/u2StDPFftFG9vd8gpilhbBl4SKHOrXkMMOrxqr5ozRE2FrS68hA+kfwdhwHmRuUUsdBCu56YWolqaOM8uUwZrRA8dC+WLa2IBMhAeTBVDZ2HR9b8mjlmYZBI5RKDwu9MXzhKIa+E8XMdF+8Pl1krmj+mVWDl1Z9YCY3ICcoCVeb9fnChu3dgmiTH25Zj1fTIXTu2ukbLpVlJj98m3ch3Ia6Iir246TiUgWmW/SQf5DMk2AmDND8guVkjtInnEbsgx2hKugnuD9NsNGnlNTKtEWm6ki19E8g3vC7yKkHk9nWdT0mCq99ApbQWdCuQfx7J0T23A+3fPXuliVYdH3Lm5Q8oOZMSsKGl8YrmOoCKTgE7xiEcmb7fqwqngIVGY8hsWdY2UX+hxr6JjsTtSZ6vtIlr3QmZJzMPXVPQ6rol8VHCco02irJDr6L4G97DI8oTbidpGpwqmJCE5q8AuWj0BudS6inZlXRmsOvqrealy2GUmMehnq7rEl60D2TZWMAUzhyLFejV/L3XInCIQWHhRMPccK0Bi6he+XzzNLodzkKcBnpQ7NnWITLNO3LYwVaxo90hhxLx1LNyEVVSxkN6D2ztlcjUIPFaSrOkxyWMWkwL/Hl0PS2cjG+wdm9rlcvIP7J0htnT5ZAP4oNEltnHekimHZFKg/zczSdy7tbPPSwd1Vk9yByV6Hsn+sTlWuZQBFsm7ZUPsKemZJr24iGQMGSoxpPNrZSXeAqtBLhIJuYfHDjIKef7LkgzpCocC/7qtVy6ed26BTRyVZRVrqS3BQ4McJ+cV2kUK3Rd5edRrZtP1Lyyyi9Jun/iwsUeWH0Ojvf6QzNtz37ZOMdtuEDdTu/ya2gPICDyGPibcUxqHZ1qlHD5DOlo1CPHZ7RdSxu4lo7OWZBTHQDe/EsPbAe5yJMkokp4tTjaSru8Vj5Z1ybw4J+cmSEMQwzfw4H1Dsgeyfw7JNbzkxaR7b9nQJknJfST/mDYMQxCZi+dSP1JC19yLrgmPyOPlXZJgYOHJFJNWvFXnkqTj5rn9+xrknLplfpogLiPqqOxfK+kmpoqkT0lCkkkCiqRl0QCZuJKULVYqhXAcmGYWP56ZLJa1DzwW1h4y+M2xC7lCKYgEhB468gkSPgjZFdOTxA+InLxe3y2XZT/rqwbJtG9dkCaFHQIM9vtQaacUZBAPXHO2PZzD4ZjsBElIIIzPVDplQjWU8edzUqQyX9HuFheQIr+kOEn8ZVuHdF8+Vt7al4iHMfVAelgkGnQy3ikaORaduKokSZz2RYNw+ALihjmp4mxSrrHGyUHQQWih2KGQ5xMJhmeipscn8zHA44HVS7EK9V7J4XH5329slwTmuQOyxQfUhpKpHEgUdZ+SRwcKuO+ayWYxj5Q2yAXuD2IDxX4oKey96LfLljcKiPHD+2TKdoj7NVM7e4CHdhgKYNBh8dHuqEyV0KMXFCZKwowVqtOJADxNsvrfpoErUMP5sIX0Krx+VqdGnJprk4QEIYXIrwNifQzpduhY5JT62awUce3qZrGJzrmEdDv2P17ukAvmPUxGKQi0RikEvNil3RpxCl0ThATek/eIrPyLjEro8b8SiYnua74g41eWl/T9p1NwaGdKEhKw6GJlHY5IwI0H7wQWCoOIt3uDUlDTjQMXxEPw309mJYs/b+0QGWY9Mdwd18NwC3Qm2PnCJKNcFRWCrwa6QrHOTjSQBdu/0wB7hlDeQ9fMJmZeZB1YRli5cHPDw4MyNvQGxAk5eulGZDAgZ55AUGaUbI4ipnBfQ8ZBRnBcAxEBBPk1Rh2H0ZKfkmynkNw/X+0UJxAxhhsZij+ojNPDjd1F14H1iUXHkKUYHgt14bH8qHU+ILe4JoaLXiBL9aScBDnEEx38DQ8LSLtL6WQ6iRjBC7ixa3RishiTB9DjuURKMHQT6S2DdwPrz1SR/oUuhkxCyhA3GA3su3/vDDLowkYhPHoq/MHwwo8w8BINGqljMcTS5gkPSYKEg0BbdJp+1ywg2YfHBMS9V5Jsq4gG2iH6laH0NVMJU5KQIKPeXYvSxd6kjGFJQjlD2LzSAvTLQLuPmoVUqiAPEEgwZrBaHAv5m5agl4odMxKmk/IvSehPHqCAZ5MFWNrtlx1DeBxfJzP44bqx3HAIzIICxjVBcAotBnlfKHiTdsdsIFwPxyKlMZT11i4vr4/DkECK9jfqXGS5JUpZbiSFp457uwLh2KU8UsJNJNcg5fBggFxAuqDIIZPwZEB23yULcdMgsgXlCU8MlDKs1L2TTVIxpxkxbq4RVS6/ODhtx/EY4pxtN4q36Jrw9LmUawaV9qRXGhfKC8sShgG8h/gcLBMsY2rjk+ZeOXwIEgHPA2Qb8SAQZAz/YfjdLGWSDEISNxAUxHTgEHxC5OBlgW5dRvoYOjV6siXaBfQ7+gEQcBCHItL96BNmkjxjWDQ6wBvDnUblmhgKUq8JkTcp5CWwk75mKmNKEhIo4d9uaBOXTU8UPcSKIcgJZK09Wu6Q7reb5qbI2QYppBiR2h3BSxhbPD7HKr5o6RXnFNqke+4E+vtPe6XLsXgIFwQPihZDNWDGsB5vXtcqhRizcu5enC5delin5j+NPZLUQEhVgcXY+3HZFrnUdTpds5KUuo9+BIG6hqzL7U6veLbSKf60pV1cQQ0R5YG35M7N7ZIo+RRGzxgEmslfPSCsGAKEov71/BS5Xgce2kky+Fh5lxzau4NkGx42dPhv1HXLTh/n7UME/SuS8yMyLXIc/MJpdrE01SzjPWQ7oePhufvFnBRSygbqEHrkOk4YhsG5IPmwIB+jNtQhvTA75BGB3O/SNRGXsi9dMxXXpHaFjgPtA7PTMIz6r5pued27F6URURIyYBFDk0tTzDudms+YesCw4jWrWkiP28VhmWbpwbMQEfhPU4+cUbZkYZqUyQSSyUfKOkUHHY94jhNzreKfFV3i8MywdwOrsUPfg6S0KbGE0OPfzbNJYl5iM8rlE6DT/0i69+qSJEkccO4fNnbIoZhID02pyytna95F+h5o84aHRP9Hcn0p9TkgKVgzB33NjVF9DWQcTsOpal72ERKdThdIMera6KWl7eyEyQCIDgKUvhlkbZrLlzcP2HezEoAHqGsWfB3j/O9+US8FFfeIjI5+mRTty1FBVetImV+6vKnvbyj2C75pEtHAeCI2FRhzjxVoe9qXDTGfZ7KCrJBh+fEzLIbmaiFKxqo84wUQu4dKY69N83a9S26RAEm+ZFlY7iLlKlpegSM/rpUEHCSiD6SMfxJjfZC/RayPg5aAoMJYgYU/jTr3bzHK/jkRJWxTCUa9bmAg0BBgs1i6rTqNiyz3hNEu03jDVjLSbljbFvO3a2OsS/MkEZEnK8LfL/52oMyruG5Ni/SkYCZl5LAivDLYonF6hO6FlzJWH4KA1Y+jzo113FUrB+6bzNBrNH6dTiuZYB8hMRqN3uk2/faV7ZOfkIw1eqfQNK14guR1m0FvGPJUojkpCRtXdoiDxrJMUwH9yAhjzDAnM2XTSM7LTE1pSm3XtvX0+ic9IRlLRMb9McYOmWZdY2piopzu10dIrBZLz4kpna+t7dDs4w+FpuRQDmPigKyXwPFJwddNJuOQrcgT8+z/fqvbew4pmaSxLBuDsbtINGgdx03Pemck507Pzynbr6np89peceFol4vBGG3saw18UZibXYXvfcRDq9UGL56W+OiHTc0nf9utOSQkhGbwSzAY8cV8m2Ht5dOTHsRQ41DPOaQg7eOza7Y/9aTDeg3LN2O8AmT7snzrQwvzUtaM5HyLydR7TXborhXd+oOqe/zFo1w8BmPUAO/IddOtd9isVjmW288TkmS3O+6c23vN9WW+R1a2ew6ITxEZjJ1jts246Q/TQtdkpiQODLjZCcwmk/v6vYtua9nam/t2Q/cZSFw6VmVkMEYCg1bjOy7d+MY1M+13Go2GEc91njutYNPdovlHP90WerLJHcgZzTIyGKOBdJOu+fZCcc2cnLSN6r4BQzPz8zLXvZzUfeyz29suf6FDd2mp0zeHh3AY8QYCn6Yl6Mu+m9D70pULsu5LsSfETjG6C6Ql2VsfWWo977UNznOf7bJcubrTs687ELKMdnkZjOEAQahLUszLL0nqefDEedlv6HW63Vq4Ch7vo6Znv/8fW+s+D1b13vCfTnFKlcs3fbTKy2CMFAVWfeXhNv8H185MvLswPblSo9H0BV3GJBp2m8155WLrfWd1dD7b5XQmsnt79/DO51+/nmFP+Md+ey16I95lmahAigxbgqU7PTW3dTjDNLGA889aNP2FY5zOdzs6AymBYHBqZyPaDdTU1c/c3ND67jFLF86KVCyM4UGn1QbSUuxtdlu2czTrMSczvf53yb4bftzRcberN8hBrruB9z//6g6dRus85jsH3BnvskxkJFjMrtSU5HaTcWD836CeDzDsjLTUFmxjW7zJj+RPP/GkW1ObSqYVlca7LIwwoPSTExM7scW7LBMZIZ/X2NZQJ2YWT9se77IwYgNDPzlZmVMrJ8AYIPmLz7t0OuFgPT524KEYBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHE5IxQigU0mDDd40m/BEMBrXK3yFs8Swfg7E7UGVZlXGWbcZkRKQeJ+CT9fgYggnJGAACvGXLlkvcbvd++DslJaXQbDb/YO3atQfTn4H8/PxHMzIy1sa5mAzGiNDT02MvLS29JRAIJHg8nhTsW7Nmzd+hnA0GQ/ucOXNu0ev13niXk8HYXVRUVBzrcDhOo68a0tkHabVaL+nxv5GOD+Xk5LyRnZ39AZOS0QMTkjEABNTn89WRMP+A2LQJ+5qbm4/Elpqa+k1BQUFzvMvIYIwURDrcLS0tCR0dHT8UYatRlJeX/5DkPlhUVHQ3kxHGZIHJZCqtqqo6y+/3p6v7iKAsIgOzMTMz8wEmI6MLJiRjhOnTp3/V2Nj4fldX16nqPijstLS0R9LT0xviWTYGY3dAhMRH8v0QWYpnRirqhISEUrIYn45n2RiM0UReXl7Z9u3bn25ra/tpKBTSKrtDpMdfpN82x7VwkxBMSMYINpvNSQL71+7u7uNVL0lycvKK4uLiN+JdNgZjd0FyvJ4sx3+1trZeKcJekhDJ93Mk81vjXTYGYzRRVFT0hNPpPN3j8RTjbyLelSTnT7J3ZPTBhGQMMX/+/E9IYf+3paXlJK1W68vIyLg/MTGxI97lYjBGA/n5+X/v6ek5ibYCIuBlM2bM+CcracZkA5HvTdXV1c+RHv81/k5KSnqxoKBgQ7zLNRnBhGSMkZqa+keHw3GAxWLZTgTlpXiXh8EYLRABWU+K+lkiJL/EeHp6enpNvMvEYIwFSkpKnnC73ecEg0HjzJkzH2biPTZgQjLGmDNnztfErF/Ozs7+WKfT+eNdHgZjtAClTIr6yfLy8gPnzp37D1bSjMmKnJyc6tLS0kdsNpuRjMy6eJdnsiImIcG01ea2tsxP67uOXlXXtl9tl6vA5fXZ9nThJgvsGVlpvRs6FvtXfnhFvMsyEZFgNHTnJybULMhKXntUfvJ72Rnpjbvb+XV2dSUvr2094PO6jiPrHK6C9l5P2miVdypBqzfo7RnZmff964tX412WiYpsu7U+y2ZpPDzb/uHSaTnL7Dabc7SuHQgEdBX1DdM/a+o5amNTx6Kqzu7iQDCoG63rTyVYklIS/e1+nW/TR0fGuywTEUadzpuflFA9JyN54+FZ1g+n5+eWkZEeiDxmACGBAH+wbtuJ93Taf7/eIZYIQXrahG3PFXzSwU2bkficMd4FmcDw0FYtxOwOz8Ybsyp+c9ycwrcNer1vuJcB2d5QXrXw1nr9n7/q0BzuC6YYhD6FWOMYlHmqoJs2+4x58S7GhAbV4ePlmuuOa2t646ZZ7t/OzEnfsruX9Pl8hsdXl/3fYx3W62p79YVCZBC7zxiN0k5NBEU4fNvOdThiQGPXC5HbEaq9qnH7n3+wZPojZqPRrf48gJA88cXaq2/rSvmTJ+A179GCMhhDwFand/5VLs2L1wWabv354rzbh3v+N5u2HXRxpelth8+bPAbFYzBGDG8wZHyrQ3P2V2tch/8r4D9qfn72+pFeq7fXbfn1Z5vvebY36aqQ8Gt2fQaDsedQ3+vPv8VtvHfjBseSP89PutJkMsLk7E9I1tU2L/mrK+U3nkCIyQhj3AKK+9Ea788OttV9esCMvC+Gel5dW0f+bc3mPzl8geQxLB6DsVto8wQyflPq+es/krrPTLLbOod7PryAb1c0n/GSJ/nSkAgxGWGMSwRDQvtqTfeFB+ucH527qPgZDMP3ERKv12t8sllzTZs3xP4oxrhHpz+U8kSb/tq9C7zLjEbjkDKDvlvvOmN1V2C/sS4bg7G7+MoROPyjyuZjT19oe3m457Z2OjL+0aL7sTfo54F2xriGPxTSP9Vh+tHhrW0fZmekN/QRkl632/Jtu/vgkJIKmsEYz4CcftXmPtTj9ZqGSkg+res4MhBK4IA+xrgHrMdPyuqOOn3h9GETkvK6hunlLmvJWJSLwRhtbHf55tQ2deX3IyQIgKrp8RfFs2AMxnDQ4glkqStvDgUVXe4SYUwYyyIxGKOGJmdPzkjOa3d0pXX5zMmjXBwGY0zQ5QsmuXp7pWLuIyQYd/QGQ+ziY0xaePxBM890Ykx2+AMBPVzh8S4HgzFcTBihteg04tfzUsWCJKP489ZO8XlLb1zKQcUQPypJFifnJoj3Glzir9s6d3mOXqMRPyi2i1PzbOLV2m7xz4quMSlbgl4r7lmcLtJNOnHLxjaxweEV+6aaxW0L00S3PyguW9YkHL7ggPNm2gzi9kVpQkP/frG2RVS6Rp6/bf80s7h1QRrdJyAuX9YsuvwD78cYiPmJRnHj3BQMRYlrVrXAaohLOXItetnOSkgmbljbKtZ0enZ5DuTuL3ulixSjluSuXWx0jM1iv4ekW8T/zUwSHd6guHpls2xXV8xIpLaVKJ6tdIqHyxzCFxyYnub70+zinEK7WEvPcvO6thHfH/e7qiRRfG9aomzDj5d3xbwfY+fQkg49NdcmLqb3srzdLe7YHL/VNPYj/Yh25yI99fM1rfC67vKcpSkm8bPZKcIdCIofrWwRnlGUASt1MH8iHZ5N7fA2aktof3vT/f5AOrw3EBI/XN4s2rwDyzgDOpyOMVLlXk/ttrx72BkR+rBPKu6XLusEfUbnHtRFQyIkCCo5u8Auzi+yieVtHhKg9jEu1kDYSekdm50gMsw6SU6igRd509xUsSjZJFZ1uMWft3SInsAOQSmgF3wnvegEvUY8VtYl3iUyMRLoSCkdnmERS0hIvm0bGikyUdkOy7DKzvp/TT39fsui5/nd/DTZEXzQ6BKPkFJV5RsK/lf0TCV2o3ip2ileqN55vqQUg1acRETJSYJk0obrCGRjLnV2ZDGJLJOeiMLAzqI4wSAOy7SKapdPGLS7DiGy0bv4XpFd7EV18CKV6ePmHfUwS7kfGmkGEaOJQEjQ0dxDHSrewYPbO8WncSC7UCjHkHxvd3pFrFcwjd4RyojfouUXh59GZPciUvAgnL9Z3ybqekdGKrPonR2eaRFWesemGO0sFlIVuQNRMEYVHiTi53OgvEPiT9QmV3b0pRyQxgUUH8jXHZvaxRbnzokM2vbhJKcr2sPXgIwvTTHLukEnYaG/YxGEg6kMIOYNQ6wTkLFLiOSY0TlQmRvd4Q4A9aHebx/6fFbrnJCEpMCqJ1nKED3UNn9JBK3BvWcTSGtJYg9MN4uDSI9W9cS+N+T5+2TEQYVfSwS9PuLdQcZ+QTKFdxpL1w8H0FWHUDnWOzxiiOIuzzkqy0oGn0doRjniMsWoEydQW4L+VNvfdJK3+UkmqouQ7C9iEZJp9E6hwxvpXRqGUCbo8AtJh6Mf+1dNt/hvRL9UYjP26zM6Y/QZY4Uhe0jy6YEPSrMIp298NkCLTisFdCmxu4Wk6FDBX7WGFRfe6/lU+cdmW+XfX1CH825DPEu7A+hAiCOJg6iBglS9VtstmhQFOD3BKE7Lt8nO4a4RksC3qePqoHt4iM1v7x4dwUqk8lxA9TmbhDaaYL1Z76IGEyQFERRlrpGz9D0JKBWQK5CCl6rHp9MQRAEdK/p7i1YrPiMZ7lbIXiIRgmtmJosFyUbRTsoKRHCkhGS0sZVIBpQeOvdNXZ4+QoLnOJmsZMj9ynbPkCzTaLhIxmAcgciv6vCMGvkFIbliRpKoIvl9YLuD9oTLBovxD0ScQAZRZucEINuxAF15ABlHTtIL5qH2wnsYc+wGkg2LJNuXETm8c3OH7CABkE90pvAEk+jLjnukhGQi4IPGHuFa0SRIre6StA8VNup0zi+0i3nUV0aPNkC+HbLPCIlto9RnDBWjqn0h3McQc8RDwnJAo422NuHd+A4J2kJSnrDGq11+8SF1aq2kkPYlq2PfNJOwk5R56fy1HV7xRWuv/L4rJNE58CgAOB8WzrI2NwmxEPMSTbIDVZFp6v/Ys0j4jyPr1EovqZEU+TsN4fKogPI8p8AuComUoVGkmwdO1MCzHJSG8psF2g2YO160fxdFh+X4Pt0PxAMd/DzamtzhOvsOKWt0NnCBb+7ySkv+QLo+FLyFygoX3nJ6xm/JYhysitKIcc+m54Pq/IIImlqXc+xGcUKOVeip3GlG7YCpVbBe0QEm0294l1u7fOJ/zT2yvJGAZVxMCrybHvTxMkff/fDcX9L9VAsSzP67eQkilX6HYn+zziWqFesI9Xswkd25dE/1HaL+P6B6qR0nHSuQTc+AYTfUCaz6t4h81UZZeDlmvTgiyyKtUNT1uk5qA8290qpD25hOdYPvnaRdPqH9W4eoYCB7qvMB7es4ItcY/sOu80ixgIwAkMNk4444X/Q3R9N94V3Am9hIVh3kLfIt5ln08t1A1tA+TTFcNPBQnllgo7ajk0TgvfoeUUEd9q5aJojGl9QOjqbyoo3dTR0LrD+014PTw+mOQK5g9eEeGAqFpwrlxr6dyQBKCetxOskf3gfKA3HD00Mu5yhWHoypSOD54AWCFYjODB3z1ySrq6OGp5JIVn84I1G+K7jOQb5BWnE/uLEre3x97W4/MoTQgeJ6ZU6feI9Ikktp/JDpw8gKL6KyQkeiCcGlDvkZzx4WDBXAe4AyV1LdvkfvInIoEWICPYK6hM5Fu/6oqVd2mulUdyfmhD3aQDPJweskr7GGjGMBdaVK4dmFNvFslVO+X3ioQb5BRoBkum+kVxd/n0D3LUzQS131Mb2zdRFDiDhyL2oLR5E8ouZnJMTuAkukx9Iq20QNtfH3qZNu9+667PDsoD/BcPn7JAPoC3Ad6OxV1B9+RDoUZHBv6uvQ36CM8Ph9Q3p8ME4F+ZlN9Yy7f0X9Ya9y4CxFh0OfpMbQ4WHvj1l6XSBn20kuYaj3Rt3oeKovtJFektdHyx1SR6t9xpd0P3VICm3/FNITqHu867dJftXhfbQ5eL3Q16aawvqnXWm/1YN4wWJh1AgJCvvnvTLEUaSMISB4BDCsl2ucctwWyhlK+tF9MomMmPpcu1BYm6izReGvKkkSJ9MD4xecj0p8aHun+OOWXY8xooOGJwHnQA+gM92PXjxe9LUzk2RHAYsS4905Fp1UeHgvF5Ay//X8VJFG5cd9se+62X5x4deNYiOVCwL50N6ZUqHinFgygxd4717p8hj1uSBor9d1i1+sad1l2dHRQ1hmkhBAIWMIBB3DodTQIQuwAGGNQWFfNztZHJxh6asj1PGv17fKBhsL06jBXTsrWdbLP8oxxi6konhwaYasL1wnlk68kOrlkulJfZ0gzodr72drWvodh8aPrdkdEM9WdsnhH9wPz/8E7ifC47R/o/eebwl3qjJOgpTK9VQ3UMqQHfwOy169H8p0xXSfuPjbJrFtlKyC3QFI9P17Z8jOUn2Gn9Jz/mRVCymesJfoHOqwb56XSuRL3ycrn9O7/azFLRv8bxekyU+1zps9fnHV8mbxVZt7Z7eWSFMUMNzsRrr4pdMTpUKA4r2C3lNY7kNS/jKVY9Hp/57ueTaVS1XaOA4WEMbL0bmg0/nn/tnSAwNPkezQ6TPS4tyf3h/kpcBq6Hv2n8xMEdeRLLy3i6FPHAvZhLKSXlb6hHwX0bVAGHqosb6jXAOKHLFMcCcLpY4uKfaKK6mO0BajgfIelWkVlxFpeIZkD4RBSzsfoPcEgqXqoWhAoWJ4F8RObUdQsHgXH0Z4/NC5XU51CyBmBAQSpPL7ZOz8o8IhOztc4PrZKVJ3JSjtCe9hbUeiuHJFs1TGaG93L06X5VE7DbyHI6h937i2dVxa9z8h2f4xtVHov7BeDImrZnjFZcuaRTkRAzzrdXTMZSSHqh7BU2hFmyQk6HTvWpze157xiGeT0XX58iYyNnbdsUPe8f7haUV7OpdICYbPTs61ikOo3qBfcG10tgk6yEtA6p7H98uiTtEgiUtIeY77tnXKDUW5lN4d2qE67B+t+7D3hzOSxPVzUqQnWKP0E5fTc/7fyhZpGO4Mc+nePyV5KKU6ANEG+YWuQ1/0a+oH4Z36yawUcVhmfx1+68Y28eQgsYWF1FZwjQAdDTnHs0MOH9k3s58Oj7YjziMj+sqS/jr8DTIEr13V3O+446SxYBUd1Ac/TdcvonZ6Ld0Pcgy5ByGBnnhknyzZh6s6AMdgyA+GEXT3g9RP5lj0/XT4lTN84qJvmsSWXdSbilEjJHiJx2RbRBUxpl9QI0PnA+UMtxA8BbDKflySLJaScgMB+SNZSmDdCN4pI2sBTBIPD4YGpXtpcZL0aiBu5O+ljl3eH0JppppAxw0mDwH+/rREQfIsO3ZYW/+ljuNsKg8UHrwNsHYwFom/7yeB/Q/9fjW9QFhXt5DQXkqN5wjq/I+i54LFfuemdrGZBO2vSzIk+1QBbwxYMEjFrze0UVk0MggJcTfvUqfxeevOOxwQJTDw6bZkOUaOckEZwvpw01uFog03zpCsi9uoHKgvBFYdTwz5eKqjV2u6h/SeoIyvpHcFQX6l2kmKtUt2tr8iUhaJV2td4l2qjzZ67rOlYCfKcdMUQ3/vEMb+UW8Q3u4YLmx0jr8iOYAg435PU+cE4Yeyu2FuipQFdEpQ1ugUfkv1B4v4rkXp0vpFJw+3fDwNyWwzZDlF5NEzPF8ZjuU5hWTkhyQr6NggE3gGPCfq9+36bvHgdkdfBwRFgDgEEEd4VKCU0GkiFgHW5de7ICS4Bkgv8C0dm0r3gIUFpYZYBnT0n7b0SKWATh7WKoD3BjICsgi5hBa5ZUGqlO8VZK3h3V9FsgDvFSwveC8gewhK1ilaBR6vm+g9FVJZQWhfJDk7lNrEzfTcPydyjEDRXblJELhYQXphPhEAeBoxlHoklR2d3Td032plaA/HXENKv6rHR/KvI6KUReUxin1IZ8QiJLEAxQkyAlm6d2un9LDeSbKE+C0VULwYAm2gejHSY962MF0O9R5HbenjCI8u4hZAmvGJ+JhAaOCDgqxBlnsD4dgd1MdtVH9L6L1AtyBAGboGG+KDEAQJz+eNVH/wij5R3jWkwOE9CXgQriPd4ie5/cPGdilzkG3UIYw3BDriGdBJo0Zup2Ng+UNGQcDQVld3eMQlyxpljAgCtv+0OEO+G8QntHp2Lu/gCtB/0CkvU1u7sChReibRh0DXoi5frXVKXQkvA8gFvDi/JDldoAxBIK4EQ/g/IxmFToelj3aAAGiQhEfLHNLAOpPeATptFfA64L3hXUMX4bwb5qTK4X4QsB+uaN5JyYUkrbgH2naeNHzD/Qxk7rOWsIf5EdLhkD8QHRAmGKHwHr5QPTQdDhJ2tUKA/01k4DGSoQNSw+8mEm+QHoKxi77rdHrOH9E5R2ZZ5PmRQF8MIwXliTUMCZ2GQHcYP69Rnf2TSMvRdJ1ryCgBcVvn8EgvI/Qd2gHqDTwAQbaz6N2DA4BwDYV3jwohgQB9R1rtGvmSIRAoHFgyYjfgrYCAgtmiTA+TslaD8jYpigb6D4bR2fl2Kdhwj0MTQ+DgGdhVJDM8JBBKsG8EgELhHUkdKPYlknJ7oapLfNPmEWcVhmMyUOZFJLyobLgTHyYhAWkJbRfSvYqYAnSmB1DZ0QA+phcLJohStCLGIzF8X6htKGg8L8oKBY996HRwDzTiL3dBSPCiEDsCAobOAY0ddZBi0sog4jWd3r57oa/BPSCMEHogyaiV9xsK4CmCJwak74HtnWIrkSh0OpG1q1He6anUUEAYVescljnch66IcSgo60076SwwHDCbrAYot4eojmFhbHF4pZCC+cMqXtMRPh+DZBi3x9AS3PxoyDnS26AhJRc/RoLhEihSPAPqDBYiXO5nUWc/jco4g96DnWQsk8paQ53pnZs65DHRgHcFJAFejAS8SKpjnLOrwDj8rg7DbKP31Uh1gyEYKCUQf7SNx0jBXkIkfl6iRthIFuQwGMkRhuQwBv2BHKYJicV03k/gZaPfIHMwEOC9RMcIbyKsNVzPqhCSogTIS5h8z000ietnG6RCx9uA/CFYPHrYKhrwgr5R101lS5U6AO5wdCbA63Wufm58yApc9GhzKAGKoXp8dgWNJqxz0BZXkr55hto8ZDVWMCvqE2TNTHWVqQwrYJjBEPEyQCRL6T1XKu8y2gLF/Q5MDw9pLG/3iueJbIOUP07EDUGjBym/qYCrHJZiO9UH2jqC1bOG+Gx7CigtdBY6+VXU0TxFnQ88aZAv6MQDld/CQ9xaaUjBiIweysWFQAjOLzRK3Yy/tRHeu50BXi5493BJ6AKQUnjWQPwgryB2L1E/s39aeEgZ8gijEveDnniqIizLIIcY0sBwCYg72g7aYJcvIP5KZLWV9D3aQ+Szw5sLooxnxlAnhqxg9WuU54k1nBkJ6NMtXT7pEYQxjRAA6HIMldT2BuR10PRhwKPMhcpwIu6ZMMR4HvQR6OjhPUIQ/nrSp0l6zQAdDo2hDoGqBk14eEcnZ0GqQFDzznQ4+mIYOoFg2CAGAcGQ1HmFiVK/w3BergSao5tGGAa8Q9gQhpBD56M9xyL00RgVQgLVYVDcOCrDws17lO+Y2aJaCShTa4woYQTMPntAtlSgKveI9Xo0g+xPpQ4BhAYvaR114BBIsEIMJ6DCnyHLFhWL++PFo0OXFqxGro3SR3jA8MDMzcpYesRwfN9ij9HlwfMB6GAjx6txHZRpKJHYEAiUGdbrWdRpIcAVZX2LWK46zgw2f++SdNlgg5EpdYfRV0Pm0cFjGyw+GZ3dM/Qu0NhVPRNuK8MnBXifGAzDdVR5gNfHG0AcgYg5YwoYStzQngJeL/SQX3EhA/IZaLNrws+gOCWkYo5lZdwyP60vjilI9agbRng+OknVqkGH90a9i6y6ZEn0AZBlWGYgO7gsSDYcuerwIbwFuCfkSfVioTPBddFuUXT/IPWNNqB6euB2DikyEFKup4/iwYM9Fazbi6clyvYB9/MsIh6wqjDkgyuiDuF5hJcC191ZHe2s5tRnVodfYwEdG7yLYXd8SAwcfR86jMqpIBtqHYIEQcNBh+hjPAMOG8/xsEblfUPGVd2DuCHUFWJkjIpMAO3Sc9T/fHgUH90nS3qd1N9UN/9QgIBL6Ge/7ENCcsgPsgfPF4CZffCqY9qtXhPubBHEDV2D+7mUykX7RHvE08BYRBsNFxu6L3ZpbMoUFXgZYWSr8o5n12jELmfiwBMCnQ1CAm9EhzI8BUIC/Qcdfv/SDMXIGlyWd3YblEHqJDG4Dl+QZBJPkw6HLojU4ZGG/VDfh9p3R+o/GC7Q4TpjeIZrLIxEh48KIYHggBkiNgSMDB4KMCfEOgAgCIgRgfWImAYExoC9wnLKJeF10ItSLZv1tP+CrxvlkMgzB2YPuBeOmU+W2ofKMIYKkAK8IAgglNmTZPHBQwKmDbcWWF2ywdJ3DShaWLlQJGCcx9KxCERD2ZKI3CBuAW72zV1h6whekAOVmJRI4PWsIWsMz44x5RvWhd3yaLQIfoNFZI3Q2gj6scaICsdfcCPCElDdywh4fLchPKaNhoQ6giB/RPfBfHSw/weWZg6oIzBvuB5hJUYDZLCJCBqYK8ZT74qRA2BvehY0TARznfFFg0gzacVzJNyqGx9CqXpJ4MXAs+KZYg3ZwE0KD1SJ3SADvh6rcMjxT3he8P7LuiOs1/HDQfqhicqJoSt4Q06nZ4DViLrH2DbqEoFdBmXKKawpDElBacKChnVQT5YRFBQU2t/JokGuDuSLuag4ccC9cD7aRKd3h8UCRWtWY5Mw/EPXe4GU9E9nJ9NxAfHQdodUFmqwGpQ5FCi8URhKRKzC05V6Wb3o8HGlDdQmO8hKKqey411gyvCKGPKC4TM8Y7LRKH6zvpXadrckXBgeQhvC7/CSALDyZpI1uyrGdTDDC+5eeGfOKLDJsmAISJ1SC5mFJQvcTG0IbuEXSeYOy7IOuBYCHuHV6/D1N2xC9MyqpQfLF+8BY+aR0CntCNWJ4YCfrm6VrvhfzE3pOwbDM6hPdErwQMFrB89gdEBmSKljAMNRsN7XdLrFafkJYQ9DuzdmmxjPwHvBM+ETXsEDyVD8pq1XDplArtYos5m2dHnoGJusSwzJQL9i9lci6Q3E6EEv4MnP+6pBDre9QO9yYcrAvJuQF9UjocJM14HKhH5Hp4bOHP0F7gXvCNoWOkiv0qODwECfwpOVaSbikmsTy9rD+TvmKCkIMDEA+gzvEG0MMVh/294/FADPjOfDZeH5/SXJIYbL0awQV4F+ql8qCTJA4fGJDNrEr9DZ180OyGFVeGyg5zB0r1FkD2X/jAyIyxASQG3z0X2zBtQLQhDgefg2xnAuSGAD6YB5STpxSbFd/GFjfx2O9r042Sj7PngHTyMdju8w+K2K8QxdocomPLw70+EY8mlU+gzo8L9TP4XUFzlWvdQ/pbuR8yQawyYkqMB1xxf2/Q2mhARKfyvtlJY1mOyXRxdI6wuChml+CO6EIKDDXUjMDXEBIABgqXiBP1vVItaSQIPYQID+d2S+ZICROQ0wdbWaCA1cyFBqiHCHy1kFxpy1ircD5BeNALM+Cuh4jJVDjiAYqG5YLjZZNo9U7AjMunfvDHGrL016BTzBoHweuLUQD3AqkRQ81z/2z5Idcaqxv1n4tMLgj6Bn+vDwPDk9CyWXbr9PaqXXBQQHncFJuZgrnirH2aIB19uyNk/YShRyMbg+dzMaCRolEjyB6KGOo6fsoeOHNwiu9N8tSI05OwENDcFTCHbEeCy8SKp7XMUmJSYCrr53D8uVFjQsdHVaJTokxBwsIQsISeIuKEqUJOX73zQNuF9Nr0+6FRHUdz0p/StKkoSNyg3SCOIKwqa6E+O9ihJIxZ2L08RvF+zonDBj4vIVzbIRIjbg5vmp4v+oA0MHCmXzPD3DdmqQIAAgvpiJcseidBkjANSSnJ5PBBvvDgQVAZHokBOjYnGqpLIMyLpAfMnlRDgrlKECWOGJigsGhBvtBEq52KYXW6n+linuUkwXRxWCQEL5PUNyCS8KLNUPSC4BtBN0OM9Xh4PjIOez7Rni6KwE8dmRZnk+3rVbYfuwPJ+udMoYmt/R8yO2AG0JhiSGKG7f3C7lDsQCcUKIUanrDQ+7RQKXe430wA+orUE5wkD5dwRZgKJHuTLNFhl7hsDAlKh2VkqkBnKGmRuP75cpE+9FA96ij5t6ZXwNPC5ICpgcUdcB2Y68cjjlFFKuh2ZaZYcWCdQ76hUkA20eyvtrkvfrYwSo/4c6LHh/EGPwFB2LegNZAol7iOp2PANDsW8fmtvPlf4WvZNbNrTLWXAwLlHP0AWYOQFSft+2DvkuEYyOIQkQ7VcOzpF1BP37BrUBDP+h819IJBaBl1Abke8S90O7x+feJJv30Xu6ZNkO3WHXh70wIPjoI+DxQNwFOsV36L6Y4YRhfHziqqnKrA/EjeB+5xTZxNGkQ0EK0cnCC4cwAuhltBt4yH5O8oU4xWgPLfoE1MGJpKcf3idTzh5E7YB83rSuTV4Lw7HQgXimR+iYy6itRuZJQT+Da1yCGJuQhu7pkG0EWOvwShk8gOrti6MKBgwB4VzofAyTIiYE8VTRHAGyhWFB6Bkk6cP0eX2E9wblhYMA5AJDwnjHqg5XPRzdig5HvwWdjNhK9HsXx9Dh9XS/+0mH37tXhjSC0IbDw0uY6OCUwzfRbXWkGDIhgTCsIqUa3WeobjEw0CtXNMmsiIuSzVKYviZm/Q8STjWvBoIfL/ymUZxXaJMWPDqAFo9ftJNgQWFfu7JFKmu1g4KlhQ3KC9f7FQkEMjOCtFREsTK8SPV43M1DFX531OycbionYlm6lLwcUOyI3AZZOEPJ97Gs3UckJRwMhIYHRXsFMVmQlv1Sw9P6yruDssGp3gt4dcB24TLH9D+MS6OB4JlgaeE6yCKJKOkDyOIYLOIYQvIYCRoUBQTntVpXP6cBginRQGGlo6wynoXqD9NK0UDwLq6mzhM5FGQuCiojBGVtR5jsoSFgw/AVhB5DCBnm8KyPb9r8YjlZFWDepSTMP17ZLKeTZivWLywgTP1FfeAaiAfpVWYKYAgJgYlw64K0IEgPZYHo49n/RUqqkzrbi4i44NnwnCCCiCPyKG5hTI9F23QqnhfUL94lGuRYh4/g+ugMo6fDAVAkcMWj822hZ4cnQZIzeh68DwRQq67J3xDJxLgpvGxwW0PhorHikX67vk1acOgIoWxhVWF8+T/K1EDcHwmgECiIOo60xlEseBggexV9AaA+6SWLBMgP6kxOxw2F43GQ0fSy6XaZ0AulXNvZLeOlUL/4+0Nqk5Bd5HUoTjDKd4D3B4UGaxSPhsh7WMQXE5nKl+PpGil3mNqO+3joP8wowRTZsPs8dk4RuNmhDxBkjraxJsKTgvbys9Ut4kczk6RhA5Jc5QrJciKwEECngqh+xH6gDaHzB+HFM1crz4N3+CN65gvpPckEb9QW4cFCW/h3XdiAQaeK8oNEmHRhYon3jNk+XlLKLVQszJ768awkaQWjM9oo32OoTy5rIu6HmWcXUf3BIEEHA6v6aSL96jTiVkU3lXWH69OreFZ66V5DnQY7moC+xjuOTmIHoJOHjsHsn9X0flFH0GeftnhlNtxvFfKL+kY9I44KcRY2fdiQg7VcQ3J9zapmOfMIHSvuUuYKks72iw3SsxLWCbDWEaexPiqoFzoB8oFyqukX8G7eiZjRhTYFvQ2vjYz9E+Hp4yCpaKMwyjCkAx3zSk13n2cDs20gO4hthCdcvntqb5j5hWdHW0a26k9aEuSEgQxTONVDJT0XDGIA7/LnJKsgA9BP0V4F6DPoNwTYepTZiSpeoQ4c90R/k6B4zlvaScZb3FJ34v4/JtnDzLlci05623Ec6gN3UXU4dA/6vPNl3GFYh8ObgrLBI7tN6vAWqeNzFB2O5G+YeAGjFdd4pKxLkjQMLcHzjnuhHcLQhXwERJg8QmZfp76om+oH7Qp1guNA7nbo8PD1If+q4QpjRtVHQ5XyIRESvEoo5OcHmVqqAo32hrWDp2bGdRD8ogbARANC+q/awSONMZ3s54NMo32JXjS2nQEdwnc/r++3Dx05xigHmzYLoIO4fRPIzcDhDQDPhReNGSeDAcJzx06uoSIcDFQX8zcoP5AJbIMBDH9lZCQ4teHTv+yfBQ4NDFNV1emqsbCr36EEQOb+FEX6oKhP/6L//dBA0XG/1xD7ehhGOuer/ufAI4FtTwD1cdUuoueBaKUYDRBdBD6/GkOGMTIVHh6L/f6hiEDY/xOjztHAY1nn0YBH5JkoOYbM/2b94HIJ2cWsl692EniNssHjEe31iARyKawYpF1HXgczpu7YHPt3ePR2ltodnQq8ai9FZCz+G5Grv0XNwoNsYtbc/YMs64COB4T6oZ3M3gMBjyZ8ACxFbJFAp4klJAZbRiJabnA8iGK8gI41Wg9GA0ZgrLqNBDocdUptNDYpBHswoK1gJtqD2wdevyqGDomGS8kyGwnIMmZVfdE6eKblXpmOwSW3wQD5eI7a0XOD9AkgSkgAiW0wwON/cow6xrk7uzYAo/lHUfJxRpQOB6GQweo70dEIa/iwafDf8Y7v2doht0ig/qPvt6s+A20OnuBIYGkFbMPB+ExLyWAwGAwGY0qBCQmDwWAwGIy4gwkJg8FgMBiMuGPKE5LINLeDAYcgfwKCczHbAUGOsZZkxnEFynGjuSQ1gzFShPMH7FwWETQHuUaQLeQXGUxjrbFi1YXzoSAojsWbMZ6gJtHbWTZQHJOlLCGiU2ZxDraoI4LSMbGgKw5Bx1MZU5aQYLrXaXkJYl6SSQoqFtDCFM76GFNlMRvoqf2yZaDRxcV2UerE1NmBwViYBfHkflnip6tb5OyK0QDujZlLWLdEzdnAYOwKyPeDnAGYIiunjHd4ZER8rHVTkJkYKd1/uKJJ5pv53rdNfdlJI4FMlVh2AOuRjNb6K8hUiRw/L9d0j6tkeIyJAehHLO9xUIZZpprANF1MDUZQcjQwk+SORWni5epumSsFMz2RayQWfr8gVeZ7emmIS3IMBUgfgdlJq2Pk6WGEMSUJCabE/nlxupzChrUa4M1AMqrBsvcBam6zWJkXI6EZ5WQaYPJYqwbp55mQMIYCpL9GPpDbN7VLuYHliORHg0m3VrPDU7gz+R5GctkhA20RayQhp1D8l1BkTCRAN969KF3mefrz1g6Z0A45ohKi0wdHQJVvnWbnmnq09TiAxJoGjYcJyU4wJQkJ0vdCLh9BbhBFSyP5C4D561iQCcmAkP0VScQ2xvB2wMOC3ANYdAxZ/B5Q1hTAdS8rTupbefVWIjzINXBijlWmPgaDhxcG0+mgjH8zL1U0evyiyxdeyfHE3ARpNeKamE6HBG/oLJDMB/PEkRMC7B6rkOK6SFB1z9ZO6VrEgn6Y1gZXIxbbG+qCZIzJAyQdu3Fuini0rKtvKm9Q5jHxSQWOvA/ITQCPIBKx/YVkJxaQbwTrbeB67mBQ3Klk9M1VFs2E7H5N10c+BFwXi29Z9OF1Rd6s65YJw07LtcmcLJjOi8yw37b3ymRUWHsJabTvVq6JNvfb+WlykUIkWoK3BtmKfUo+CkyRBKG6aV6KXLQLHQ+m3rJHZWoDy40g18dF3zZKmQCQxRQb8qL8iuQF8og8KkjOieUVogHZ/W6eTWZ1RXZxpH3/d204qyp0MTL+ymSFJG+YyoxMq0goqeaPQdoDGLJImogcSpBrTPtHpnGs/YN2hHsjYzBIDvK2LKb9SKGBKcDXRfU1ICtYJBDtDM+ELLXIvzNVJH1KEpJim0EmAYuxHpRMgIaU8hBAZCK8ekay+MnqgfPpIUwQ8FSTVxIcpCte72iXk+GRgAlJapCSGsnF3m90iatKksVVK5pEmycos0fCKkTiKpB5WLIgEnDQfKKsmonkT1hRFqQCehfZTpFMCQ0FmQ2Rqe+/Tb3iyhmJ4ntFieLxCodctOwHy5r6kgkxph6Q1Ajj38jeGq3EkNDoxjkpMgcGckiARCAbZ6y1d5AoDHkWkDUTK5IuSgqn/W5XcnwgMdXDSzNl8i8oX2SevXFdq1ws7r69M4h8eKSCR/Kz+0h25eKZJLzha2rFT+maWG8D1i2Gh7AaKIaBsLLviTkJMrEW/r6fZB3LN6DjcPpCkuD7lGRNjKkN6HEsr9HpHSi/GI5H+vWbSCYR/3fP4gyxsn2gZwJ9wDLS1TASQUqgS0FIgG+IcD9CevZcIsiXkQF48/pWIgup0huDFbqvJSMRi4S+QgQEpPqibxr7iAoMRBBxDHMiISQISUiEk6Qh1xb6GvQD0X3N1SubpB5HduRPiYgMZw2gyYApSUhgeUWnfwewVguWvEfmUwgqssEiW16sFSrBzLEkN2I7IILqirgQHihtDAPBY4I1T9Z2hjPpqWsegIjsm2rqW3YdgIIFG8dS7xgDxd9ppoGvB+OkYM/IpOmXGR994tLpFqnkVUwlAWb0RygUzq6YYtCSku3/G7wdWBUamRNl9leSIaSOhuKLBCTpAmWJAqTK1ivLQCAY0K2sgQGyAA8LgmDnESGB9YhkVUg8jTTWxQkDZRceRViYSK4EccU1ewP9O5N8uRprsG94EuQaq3JHevuYjDAA6HHoPchbpC8YnThI8oYur5RTyJKBGO20GDKJFZ4xfI82ADlXs4SjHUGPQ9aQkBPxH8lyRW+dbDe49yaHRxqckUnvIJoyJGCvdPER9SMg0n1LY0RA7Ws+GdDX9C/jVJP1KUlI/ksK8ffETrGUcnOENwEpfcFw0xQBwuJaWLumLcbqxEjFDdcgMryeT0LzHWXlVRUgIBDeVk84nbpeWRUWCh3kBWm0o2VtH2LoWC8C17xICJmqPhoBZRE1dW2TdFN4ASVXDCuXMfWA2V9fknJFSmnER7kjNJq6RDzSqYNUIOAVFlr0Sr9YLh5p15FyG5kgT8pJGHAfrVxfRyfdypipACUcnrkQ9oREB4fDWsX6Mkg3/WFjr/hu7sBrAmh/VmWlbSh9pPeucA2eeZMxdQHDEUMe85NM/bJ/h5R1y2BIapXZNGGZHKjHi4kUYC0cLJg5226Uy3JEI5l0LYgNlsbA2m12g0Y00O2w7AaMzOglJ7CwIMjLUxVd0gt4coz2M5y+ZiphShKSz1p75UJMdy1OkysRQ9hgmUFZYgFAeCnSzVqxmIQJs2kQk4E4kkMyzHKMD8oa49qn5ieI6+ekyJgPWHoQS3gtMNRyZKZVLmyGdRHA0OGyu2evDOkqB5nA4ktwJUYCad9PonOx8BPYPMYjoZQ/p/JeMT1R/o7U4n/Z1iHjXOCCxMJpd23qEP4pxqQZsYG4it8SEbl1Qaq4g+R7W5dPKmWQYbiBnyB5voesNxBiuJlv2dAmiQkIc9jT4ZHeO6xqjTVBCkkOQaBxXXBekGyQHWmBkoWINS9wzv1LMsUN1BZg4WHGTIunP0GGMkdMy8XTEuV9MX0Y18Q6M2gziJHCWhgIwt3iNIvbF6XJsfU6t1981dYr9koeuFIsY2oDcX9y3ZcZieLoLIuUOazcvpxkF0YdFlf8BclkDhGHF6q6JEnB+jxHZJnFu/U94tCM8ArcTn9AHgcVqi4+h2FBrLmENcOwajRWBsZaLohdwrAN1t5CLMitG9r7LU4IoG25AgFxA/UjGiH6ZqRhmAerJicSa8cCkI/E6GumelxUX4+o1WqD1FE6unzBpHgWaE8A7xxDLVB+GCOH1GDoD+5hEID/W9kiVwP+l1SsYbcdxsexBDwsUKw3g/1Y60IuckcCKRe3I8H7/reNIpmULWQUjaJdYbwYi0RjgV8DViWOV9co6VHYBKaqYZGyyGuGF0EKB8ACcGdj+iYsAgRr4R4ddA8ZTEvlaZ9CDJusnoFztHeCZLO+vZoMmLEqz3gBZPi3pCjTSfEZdWFZgvXV4QvIoGgsqAdvBlbQhjy1kMwgTsotF4prlRYlrDe4orFwIlZ+lQuYkSxiVVZYdbgGvIs4Bw7r/1vVLC1JyGuTXIAxJJ6u2rGOBdoQVt1+N+qabmW18ES9VnpznL7wopiQdyhztDNYoIiJwbDQVFLYeq12WPKtwmIy9Zp0GjdZ8+bRLtN4A+I/sFgeZA+LPsLbB/0KYvF/K8My6VdkEsQXi5yCDGNBPcSfQMdiZW0MZwpF1wJYDBPB11jVFkPorcoCfghQxSq5Rtr/z8qw7kVbwlR4dVE59CO4JjyIaHsPK+sBYTFWxC7iVlios9I1sK8BsKpwrLiuyQpSN0HwD3zvIyRGo9FbYNVXbnR4F8evaHsOaqxHe4yJKFCU0a4zKG5VvdYp7mgpQFEBpPCGxJqei06itqe/foFy7T9kFPuaUNo1UeeGV1DecVwoFF6ReSoh36qr0uv0Q37oGUmW7eucYt+xLNN4AeStIUpGVNRGDaf0yV0UomVO3Rdrf5dvYBKp6JVsIe+xzkUH0hERmBhL3rHPHZg6ZBsoTkssG8l5aclJrSkd2vbGQCB3tMs0HhFL9gDIX7QMgtz2KvK/Q0+HBgTGxuoDABDuhij9HgiJfjoffUu0TANok9FDmbHuM1iytsmKVKOuNcmWIFlbHyGxWiw9h6f1fLC5y7uQ6m3widwMxjgAgs8PSzV8aDIZhzyp/9iClLff2eI+kzpG9v8zxjWMWo336Bm574/k3JKC3O1zWzrWUSc5JQgJY2JjgU27piAnqxrf+wiJTqcLXJ1vuOe/9cGTtrq18+NXPAZj1yiyGiquKTDerdcP3UNyXF7iWydVtb36b5f1grEsG4OxOwDZPi/f/tT+hfYvR3K+3Wp1/iy9/tY1XYZ9O7yBtNEuH4MxWkjQa7t/nq+/NSUxUSYl6hdVmZma0nzHLPc1v6zwP7TN6ZsbnyIyGDtHvkVf/fuCwE8LM9Mqh3OeLSGh+1d75d7cuMWd/02b+5CQGIN0jAzGbkCnEYH9kg1f3DTd8CuzyeTe9Rmxsc/M4mW/Czb94veVoT93eIOpo1lGBmM0kGTQdt6YJ369tCDjW3XfgFk23ynO/uQfutrTHqkV133o1J/S5PHn8BDO7sEogiIz5BFuoRMejVZ4qTo9XKXDAgKf0k265oNMno9/MtN215yctI0juU5Bekr1Ews6z3xii/PHb7is51X3+Iu9wZBxtMvLYAwHRq3GU5Sgrzg7wf30hXPSnkhPThqYjXEYgMf7vDnZTxWHSrf9rc104wqXOKDNE8gYrfJOBcBaMZHuNoqAMIWCwqQJijph4TxPuwnEjCwy+1f9uND4xwOLMj6L9HIPICQajSY0s6hg2115gWt+UFn9t5qmlsJAIDAwswtjyOh0dhcZNeIX3f6QtSekMdJm6A4Ic2qCpSzBbGxOMBmbkyyWWuFzV7c0NfUY9DqfyWB0Gww6r0Gn9wYD/oErnU0xQMHmpKfVzy2etslkHHrcSDQg3+kpKa3X75d42zmNzU+X1jTN9Hi9HFMyQrR2dReUezQP7pdpO1XsYlVhxuAwm4zuWYUFW/Oz8mrUGQe7C1znwPmzvlzY03vB1qqaOQ0trRxTgqk4Go3J5w8YfH6/EZvX5zflTytOcfsDuY4ed77T7c3tdnsyXW5Pjk0n3FZtyGvRhnwJWk1vVl7+DU5HZ0+8H2MiIysttXF2UcGWRFtCV/Rvg+Yh0et1/gUlxeuxjW3xJj+8Xq+xq6vr3w6HI8PpdKZi6+zszCrMycyn3zJ7e7tyehyNR7rd7uxk0iNGrbHTrNE0ktXUptcF2yuqq9rS0tKqkpKSGu12e3NqamptcnJytcFg8KKDxYb7qJ+MXQMEpzgvpxxbvMsykbF9+/Z5trVrxcmHHPNmvMvCiA2b1dK9dO6sFWLurHgXZcwQCoU00Z/d3d0ZpHezOjo68knf5pL+zSYdm1NQUJBOVl6qV/hSvEFvhtvnTndXbu0wmUzNRRZLnTXZ+rXPF2pqbe1qSUlJaSS922qz2TpI97bT3w2jRRgZAzElE6PtaWBKdXp6ei22XR1LBMVMjSevra2tkBpQFpGXTCIahdQglvj9/tSmpqbUyspKEJkU2h8wm82NFoullrZ6+t5QUVHRQA2ombYWOqediEtzYmJis3E3vAoMBoMRb4BkkD5Mw0Y6MpPIBghHeiAQyMjJycnr6enJI72Y3tvbmwfiQTqvg7Z22tqgJxMSElppfwcdt56IRR30Iow8MvCqmWSMDzAhGWegxuPOzMwswxa53+fzGajh6fEJjwuRE/xtp8aX097eXgxroLGxMYtIzIHU8BKooVpof0J5ebmN9lnpEj20ux7EhYhKHT7p90ar1dpJf3fiFLIA8NlpMBim/BARg8EYewSDQa3L5Uoi/ZVEn4n0mUyEI5n2p5E+yqN9eaTjsohE5MCDTOShl8hDr06nc+v1+m7SgS46vpOMs3oyvtZmZGTAQGsmvQYPso+O8+MT3mR8MvEY32BCMkGgNCofMf3IhT0aaNs2Y8aMTyOPpcab4PF4LNSQE+i7DX9Tw0yiRp5HRKWgo6Mju7a2dg415lxq4LA60Egx3BNqa2sL0f4uIikgLvVkRdQRSWkkAtRAZMlF9+9BGajR47MHQx97sh4YDMb4BkgGkQcr6R8rfVrwHZ+kLzKIYOSQDgLRyMZGeiqboA6zyFhF+l1D+zuJnGCIupZIxjoiHM30ewMRjW7oH+gefCc95BrO1H/G+AYTkkkIaqgubCkpKa07O05xgaYQYUnEJywTsiBSiPikUYOXLtDm5ub9SLFk+3y+DFIOPbBKlM3Z2traTfvbyDKB+7M+KSmpno5pxNARERUfFAUIC33K7xzjwmBMTEBXkFGiVzyzOvU7tf9M0hvZGGYmIpGD77Q/Kzc3106fNmWz034b6ZY2eC/IwKlNS0trIJ3xBekYDKUgPqMDXlra1454DSYZUxNMSKYwQBCgALCRlVK5s2Nh9ShjthkOhyMTGxGXTCI+mUR8ZhBp+U5tbS2CxrIwjmsymVoQJKaO39LxTXRMC5GXRtrgXoUiaoFi2kOPy2AwdgGlnWfRlkkkIxuBoPiknzKzCB6PJ5O2DPVTidFooTYOgwSGSCORjxVEWhpJLzQR0WhTYtoa2ZvK2BWYkDCGBIy9Eolowha5H5YTlJj6iQ2xLmT5QJHlkELLhfVEWz5ZPXNIKR3U1taW2tjYmEwKLYkITJISlNsAEkMkpZr21dL57YhqR3AuPhHbQt/b4vX8DMZEBTwa3d3dqUQy0hCjgU9s1KYzqD0jRiMXQaCIR6M2mUVtsYuMjU4iGtjaqc12uFyuNjpnJYwJMl7g3WjCkC48odANMG7wqX6P9zMzJiaYkDB2C1A+sSwfIhClGRkZpZH74Ob1er0mUnpm5dNEnxa6Ri4pu1xSltl1dXUF9P1wMsZspCCNpAiNRHJkIG9ra6uBFGGF1WptguuXiApyNtTQdbowlky/dShjzC66v3PP1QKDsWcBkoEgUCLudiVODJud2gq8Frn0G9pTLoJBqd1lpKWlgTggTYD8pN+9REK6qO3UUTtaSW0VMWON9BtIhpuMAw9m5tEnvrvZu8HYE2BCwthjwLgwNsS3RP20KfpYn89nhIJ1Op1JRExspEDtiWEU0v4s2pdZVVV1PBQv7TOS0vQ3NTW5SaH6SFn7amtrvampqeVETDBm3UyWXQ2RlhpS2C4oWgTGIfJeVbx7qAoYjEEBkhFJ2EHCqR2Y6acskHUEg1J7yKUth/alZ2ZmgqzLLRQKGTo7O/GJWIwG2uro9zVERt6n3xuJbHRRu3OCsCNWA985ToMx3sCEhDEuAbKAIZqhDNNgRhGSzbW3t+fAJU0kJTk9PT0tOzt7GinkQrIE9ykrK5tGRAbDRkEiIZ0YHoJbmv52lJeXd5AFWUnEphGu6JSUlFp4XzAFO9INrbql98TzMyYHMJQZY1hTpxCMHCTsUpJ25ep0ugxCKhGRVCIiydhIZpNJThEM2oCp+iTXyDe0gWS+CfFZGM5EQCjJbRuRjjaess+YyGBCwpjwIHLRS1sdKeu6XR2r5DrIbG1tLUTgHoJ0iZTkkFKfQx3Gwc3NzRnV1dUyoyORD4+SeE4mnQOJURLPITAXAXut+ASJMe3GQmiMiQ94NyBPCPZWA7+RjZnkIjM1NTUb8RnIo0FkIyMyGBQyRUSimfY102/ltH2pyJfcSMaa2JPBmCpgQsKYUoC7GltWVla/+BYl8ZwOAbmIdVES0GG4CBZsPixYJJ7TaDQLqRM5GNMYOzo6kHjOTh2QnS6BvAjI2VKPTyIxTUR68L1dmdLowCc2eH/i9PijAnrurI8//vi/1MEii7Cenkc8//zzMtiZnm/Dcccdd+JkGAaDHBB5xXT4FCTuUpJ2YVo8Aj9zkLSrp6cnF4m7SG7S6dldkdPi6fxuqiNMvS8j0vo5vBvwwpH89E2LV6bGy0/2vjGmOpiQMBhiR+K5qN1YcbUfcYHbPTLpE4aLkHguGJRLvCM4N6+9vT2fOqsDaV8WdULIRInETzL5E/2GfA59ieeoE6vHmD+RHySe60Z8DZI+IUh3vCZ9ok61NT8//62ampob1GRWVPZMBEMWFha+MJ7JCEgGEgZi+//27gOwyuruH/j3ruTe7L0TMggkTCmgOBAnDhQt7trSWn21tVv71tbxqnVW679aa1t3nbU4ilpFULQgshEIECBk79ybcTNucvf9n3NIIgEEblCf3PD90Mckz33GSSjcL+ec53fk76P8vRPhM1K0WRbqknU0BiaEpomAkCDI01TRQDlkZ7PZ5LCdXNukSWwV6enpK8XvmVVcQwYN+Xsne+sGCweOxN8/opGKgYQoCPJNaSAsHO7YgbLYsuDcQFlsEXrixEuyfot845OF5yb3F55LiYuLkyWxHWKTb2SOtra2XhFU2vqLzsnic02y8Jx4w5NPQrhlwTkZogY+fp3/wpY9RjKMyXWZ1MKEeXl/F23/vnhTH1xBVj6tMWbMmMUDX/f3OBnkOV9Xuwbu01+ky7TPEgtmscmqoAObDItyfZNEIUIcHymOi5Cb3W6PED+/VvF7KpdSaExJSanNzc1d19ra2iIfN+9fUkH2rHXKZRYYMoi+HgwkRF8TGRAGhmkOd2z/HISBwnNqDoJ440vOz8+XhebkYmEzGxsbM/oLUqXsU3iuXXxsEedZxb/2ZeG5RvGv+gbxsam/8Nxh59UciYqKitn19fULpkyZ8pB8w05OTq7NyMh4prKy8g6onKbzp6enPy7uq6oDizf5hPLy8p+Jtm47+eST3/oq2iBDh/i5pHd0dMhwkSrr3MjPxfeZLAJb8kDBLvHzSpQTQ+XPR84BkvN/RHubRBsbxDW2yDLkSUlJNtnTIzb50cbHWom0x0BCNALIN0S51Lnc9n9t4EmNgaXVZS+AeENWS6rLiZTiTTlbFp/LysrKEy9Pb2trS25qakoUb84JYosR//JvEG/KsnaLXAukVuyrF+fJuS1tA9U0ZU+ACBO2L2ufzWbLF/e4cd26dZdERUU9cMIJJzwnAsjzra2tl4prTRAhZXl2dvZSOSSyY8eOyxoaGu4WAWlsUVHRfeL0Lw0k8vFuuWKr3OSTUvKjCB1J4h6pok2yWFdG/+qtckKoXNJArd5qMpk6xPfTKo5tlRWAxdelcgVXWS1UbuJ7ahroyRh4QooFu4hGNgYSohFu/zdTGV7EG3O5CAEHFJ6Tb/ADRecGPhf7c+UaIw6HI6WxsTFTBJgTRZiIlK+Jry0+n08Ob4SLIGMSb+Q1/aulykega8Vl60UYsIvXZsimyMXQxLEPrVy58mIRNu5MSkp6UVz7PnG9R61W69jKyspfi/tcKIdCZJtEYJiyefPm2XLISoSGDL1enyrnaMjhKvlRfB0n7iMLbzll0S75saenp09WBpWhQrxWmpGR0SACSot4Xc6zcfWv3DpYQ4aTQYlGBwYSolFi4KkNOaFyv5f27H+sLL4lJ3bKoCCLzskJnomJiZHi3ILe3t4UudXU1MwWgSIvPj5e5ASDrPipzvX7/eEiNJy+bdu2iampqR+lpaW9J8LNzOrq6mvEeWP2vY/NZjtdhImclpYWgxxCkVV2xdcNImQsl6FHDjXJqrr9FXZ7Bwp3cQiF6NjDQEI0TLIXoqS6ccqq0ppT6lo7su09fXFat+krZgcMm5MNEbG5pr6kZN0XL/gCQFO3O2VJfcOVdkNEvcXdMn9GfADxYeIvlX2O6/Hq/es7oxpsHR1tPo9bhAw5v9WeJP6T9A1/L1+LlLhoa0ZCTOOpxTkrJ+Rmln7dE3iJRjMGEqJh2FlRXXzzyuZHlpXZ5vr88tFX+fRrlNbN+lpEtfbglrEeJEcD3V5gW5cea9r12NqpR58fehHNcuRfJa80AJOi/ZiV4Mf0uACijQEEfO7YBz63zgPCsXcbZZrlSFonwtbtcH97fOO/H5hb+Lu89JQqrZtFFIoYSIiC9NG6zWctXNr0YpPDm651W74JssOjpleP5TYdNnfp4PDq4D3I9FCXH9gkQsoWEViiRD6bGuvHzDg/wvUB8ZruwBNGEbfPH7ZoZ9vlK+p75rx/xYTzp43N3qx1m4hCDQMJURDqrW1Zd6zruOdYCSNSjxd4oc4AzxE+oyKHczrFOSvb9FjboYfnGJlyGghA19zlSrtl2Z4/vBwf9d2UxHir1m0iCiUMJERBeHFb88KNTY4ZWrfjmyRzyJGGkf25j5Ewsq+P6xxnvLezft41p8Q/r3VbiEIJAwlREJaUtpzn9Qf454a+lJxT9P7mqvOvOWUyAwlREPgXK1EQdtu6x/cvS0P0pWrbunO0bgNRqGEgIQpCr9sfsfeJGiIi+ioxkBDRV6IwMRL3zC2CNxDAr97dDpuDJTmI6MgxkBDRAS4sTsUVUzNRZuvBo6sq0eU6/AK3MzLjcPGkdOh0wDPravDfqrZvoKVENFowkBDRASamRuPqaVn4rLodf1tbDbgOf86SMit+urgEXn8Aq2sPu8AxEdEQDCREdMQmiaAyPSsOYQY9Gjr7sKKqDQ733mVnwox6uEUYkVN+www6eMTuzBgzZmbHISEiDHqdToWVTfV2bG/pgp9r7xLRPhhIiOiIXDczB7efOQ7ZcRYVLhxuLz6tasd1b2xBQ5cT+fERePTCSSqQfFxuU+Hj9jPGYeGMbJhNBrXfHwiIIOPEr9/bgUUljVp/S0Q0gjCQENFhTUyJxh8vmAijXo/HV1ViTW2HChtnFybj7rPH47o3tx5wjk6EljizCRYRRl7f2oCXtzTgxlm5mDsuBb+ZMxZLdlnR7T783BQiOjYwkBDRIcmejbnjkhErwsX6ug78fnkZ2vs8MIjA8cxlx2FeUSrCjfpDXqOivRfvlDbD7/djTn4iEiPCkBwZxkBCRIMYSIjokGQgsRj31l7pcnrR59k7Z6RDhBI5LBNtNg6+fjh9Hj+OwWryRHQEGEiISA2rzMyKw+cNdhj0OkxJj1H7W7qd6pHfstYetaZNYVIkpmfGYZM47tzxKYgMM6CksQudLo+23wARhTwGEiLCWQVJ+Mfl01DX5VQ9IuOSI+Hy+vHmtiY4fX4s2W3FJ+WtOK0gEf+8ejpsPW4UpUSpHpPHV1fKlW6JiI4KAwkRobnHheUVNpyQk6B6SHZb9xZEe2N7kwobDo8P17y+GTfNLsAFxalIjQ7H2poOPLGmCot3NKtruEVwaRKBRpwOnzgpIDY516RR7Oty7u1BkSFHHtPqcKvhHiKiAQwkRIQN9XZc/sqmQx5Ta+/DL9/drraD+byxExP/3ydD9t34donaBqyqacfYh5YffYOJaNRhICEiIiLNMZAQERGR5hhIiIiISHMMJERERKQ5BhKiUU5WRF0wKR1vbW+CzeHWujmKrF9yiWjTG9ua0NtfaI2Ijm0MJESjmCzvfv3xY3DjiXnoc/vw4uZ6rZukRIUZ8aMTcvH+LisDCREpDCREo5jsiZAL4P1xZbmqH/L6tib0eX1IiQrHz0/KQ4zZqMLB5gY7Hl9TjcLESPx4Vi4MBp1ab+ZfWxrw36o23H5GIcxGgzjXj1dFqJE1Rn4izpecIlDI9W1kKXm5Ts2VUzPh8vkRG27CwyvKUdfZh5+cmIv0GIu4hl4cuxseXwAmcY97zykSrzvxzPoaZMeacYO4t6xn0tzlwp9WVagKsreJe+vEL1kLRR5X2d6r8U+ViL4ODCREo9hpIiDYHC5VvGyuCCYnZMdhhQgYvzg5T1Vk/dV/duDbE9Jw7rgU9fUD5xXj3dIWvLylHrfMGasqtq6t68CcvERc+MJ6tPW6Rcgx4vWrZ+BfWxvw1o4m3Du3CFeJELKkzIo7zhyHm8U1d7R046UrvoW06HCU2rrxz62NMOp1uPOscep+i0QwkoXRbl+6C63imjHhRjxywUT8/qMyrBP3e/C8Cbh8SgZWVrVjSloMFry0UZWwD4DF1IhGKwYSolFK9kbML07DK5sbUGPvVYXLLpmcjtW1HZgpgslD/62Ab59qqWEGvXrzv+X9Uvi/pIqq3B1pMmBqRgzqu5yYkhGLopRoRIlAIVf0Nen12NLUpcLHgBkZcaonRBZFk70uGbGWA64rVxJOjQrHxgY7etw+rBHHzhfBZUOdfZ97M4wQjWYMJESj1AQRFE7MTUC3x4c5BUniTd+IM8VH2SvhcPkQZxn6x1+We3d6/SpcHIocUmlzuPHAx2UqhAyYlROvhmHCRRDaN+jICbVyMb57xfF3nDFODf0c7JrylAiTUa2PI4eUqjt61dAPER0bGEiIRik5d2R5uQ2/6i/1nmAxYfHC43Hm2GQ8vb4GN5yQC6Nej/PGJ8Mv3vflEMrf11Tj5tkF+KDMqoZ7lpXZDriuXS6o91kV7j57PJbI10WQ+HCPVQ3TrK7uwD1nF2G3rQf5iRHqeNnrcd3MHCyclo2CxEhUtjnQ4/aipKlL7Zc9Nxvr7XhhU50a0pGfnyKC1G0iOPm43g3RMYOBhCgIMWZDl6MPkVq340jIeSPd4o1/gAwS17+1FQ63Dx19bjXZNN4Shs4+r+odkf65tQHr6jvUEEq3OL7T6VHn/XjxNvVRkkMnz4vwIOeWyONkkJFPyvR6/Hh4ZTnyRBBJEteVvTDyXiur21RPipwn8uS6GtR09Kr9t4rAUZwSpRbckz0zf1pViSnpMWoi60flpaqHxGI04Jf/2aECTCjR63Xs2iEKEgMJURDyE6Mrm+q707Vux5HY3doz5GsZJHbZ9u67eEKa6imR++TKvXd9uFvtl5NTx6dGqTkgMmTISasenx+bGzuHXEsOsWxt6hqyT84PueGEMYgTH5PE9klFKzY12tUTNVv2O1+SE2RXVbcP2Sd7R/Ylnwgq2e8+oSAnMaZW6zYQhRoGEqIgzJuQ9t7axp5ZPn/gwIkQIWRxabPa9vfXddXDvqYMGHcvLzuKVo0Oep3Of87kMUu1bgdRqGEgIQrCD6emPLdkW+15nza5ZmvdFhqZzi1K+uCiydlva90OolDDQEIUhNSkxJb7zi687fol1U/tsvUUad0eGjl0OgQKEiIrHjw167eJ8XFtWreHKNQwkBAF6ZQJeasWRxguvmtF7V0f1TrOau/1JPgDAb3W7SJtGPQ6X7zF1HF+XuT7vztt7APjs9J2a90molDEQEIUJJ1OFxifN2b3i1mZC7fuqZq6taxyqsvtCde6XVpwenxxnfrIq4tTo59tb7U5tW6PFizm8L5p4ws2T8zP3mEymTxat4coVDGQEA2TyWT0zJhQuFFuWrdFK1VVVYUlJSW3nHn8rBeioqI4TEFEw8ZAQkRERJpjICEiIiLNMZAQ0bDp9Xq/TqfzuN1uWSeeQzZENGwMJEQ0bAaDwSc2p8fjOXAJXyKiIDCQEBERkeYYSIiIiEhzDCRENGx6vd4nNrfH4zFr3RYiCm0MJEQ0bHIOiQgkrv5JrUREw8ZAQkRERJpjICEiIiLNMZAQ0bDJOiRi8wjH5Fo+RPTVYSAhoqB4vV5jeXn5XLvdnu1wOLJcLldyVVXVz8S+eQaDwTNp0qTns7KyyrVuJxGFFgYSIgqK7BVpbW0tbmlpeTAQCAz8HXKJ/I/FYilJTEy8X8PmEVGIYiAhoqDIQDJt2rSnP/nkk/9xOp3jB/YbDIa+9PT0B0UocWjZPiIKTQwkRBS0yMjIruzs7PsqKyv/7vP51CO/IoisHjt27BKt20ZEoYmBhIiGJT8//12bzfZ9u91+pk6n86akpDwdExNj17pdRBSaGEiIaFiio6PtmZmZj3d1dZ0qPt+Ym5u7XOs2EVHoYiAhomHLzs7+uL29/T8Wi+WVhISEVq3bQ0Shi4GE6Ch4PB7Trur6orpmW3ZzW0daIBDQad2mb5o+PGK732pN2FzzwbVat+WbFhsV2ZkQG90+MT97R2pSYovW7SEKZQwkRMMgg8eaHeUn3r+i8tZNHZje7fJGO9y+SK3bpQ231g3QTLjR6zIbe5zp5qamH06Of+7aU4qfTYiLbde6XUShiIGEKEiyMNgLH6z6/m2bXfe19LhStW4Pacfl9YfLrdOJ2Fs/s92/zOqb++L8CQvTk+KbtG4bUahhICEK0qY9tdMZRmh/Xn/A+NHutrPuWrL1rkcvm/VLi9ncp3WbiEIJAwlRELodvdG/XdX4IMMIfZnnd/ZcM29X3Xvzjyt8R+u2EIUSBhKiIGwvr55U2uyYoHU7aOTy+Pymf2+o+DYDCVFwGEiIgtBobc3odHpitW4HjWxlzR3jtG4DUahhICEKQndvX7ScxKh1O2hk8/r8/LuVKEj8Q0NERESaYyAhClGyAtup+YkoSIhEuFGv9jm9fjR1OfFxhQ1uX0DtM+h0uGJqBmLNpgOu0eP2YlFJo3x8dch+o16HeUWpyIgxf+n9qzt68eEeG8YnR2FGVhwsJoNqk8cfgK3HhVXV7WjrPbBGiVm0VV47OSoca2rasbWpa/A1vWjr+UUpyIq1YEtjJ9bVdSDSZMRVx2WK78ePd0ub0d7nGTx+RmYcZmbHobLdgY/KW+HzB4L5ERLRCMJAQhSiZGi466zxmJ2XCIN+b4FY+Ybc5/Fhs3gz/837pVhb26Fe+/3cIhQkHli3rd7eh/d2WUUgGRocwgx63DQ7XwSepC+9/zsiHKysbMPCb2XhplPHqvZI/kBABZzK9l48+MkevLy5fsh5s3MT8fcFU5EUGYbXtjTg2je2oFe0WZKX+PlJeTh7XAr+IM7dUG9HQoQJj86fpNr0qrjWTf/ZMRh0ZHi5W3xvb21rxArRFgYSotDFQEIUsnSqZ0T2Kry0qQ6viDf38UmR+Pkp+Sqk3CPeqBe8tGGw98MtPj68ohwrq9sGr+D0+NHt8h5wZXnOr98rRbwIAzrx6/rjc7BgcgbWiYBzz/IyePx+2HrccPn8KijIMLK2ph33frwHMeFG/EK0QfZcPCaCxIdlNrQ4XHtbLALHj2blqpDhEefOK07FcekxWC2uezjyHrKnRJ7307e3qd4gIho9GEiIQlxA/Cpvc6jhk2VlVvgCATx+0RSMTYxEdqxFvSbJ/XJ4ZJkICIcjj5W9E5IMPOeMS1afy6GYj8R9ZBAZeG2AzeEefK2524U3F84UgSYMU0XgWFa+956TUqJxWkEirCLMrKhoxRUiYHx7UjrW1HUgcASdG0a9HtfMzMFOaw8e+6wymB8TEY1wDCREo0hefATOHJuseiIcbi86nV/MtzCJN/MF4s2/MHnv0I0c3Vi626qGd75K8WaTGkqJMBngFzeR4USSPRzf+1Y24iwm/Ke0Gbcv3YVvT07HxRPT8cz6Wuxu7TnkdXvdPjWH5MKJabjz7PHY0+r4SttNRNpiICEKcbKX4pbTCvHTk/MRZtAhOtyINocbf1ldhcYuJ0yGvRNejeK1K4/LxJXIVF97RViQx7V0O/Ha1TNQlBKt9ru8Pvxs8TYsFm/+wThnfArqb5ur5oHEmPcOyTy9rgbbrXsnraZFh+PyKRlq6OgpEUBq7L0ot/WgIClKhKikwwYS2YHy7IYabGnqxAPnTcDD8yZgR3PXIc8hotDBQEI0CtgcLjUXpFiECjnn464Pd+G5jXXYdxREzgu5c9ku9TSKJId6au196skYOQF1gHya5WDzSg6nR5wjn3Y5LiNWTaR9+fMG3LJkJwbmmZ43LgU58RFqOEiGCTkZNSsuQs2DuXpalggpNYe9h3yCRwatk3ISMG9CKsbEW4JuJxGNTAwkRCFOPtXy3IZaPCu2Fy6fhjMLk/GjWXn4oMw2OH9k4DgZPDY12A+4xg8WbT7qdnxW3Y7vvvY5Hji3GD86MRcXTkhTT9Es3WNTE13lo75yKKmzz4PIMKPY9g4rRYYZMCE1GlPSYlByBD0ePW4ffvHuduQmRmCyOIeIRgcGEqJRoqnbhd99sBPvZ8RiYlo07jxzHK57c+tgL4kculk4PRuzxsQPniMnkj6zvga7bIceLjlSshfm/z7creqjTBJh4bH5kzH9zyswUQSOOQVJKhQteGHD4JM+iRFh+OzGU9QE3LnjkrG9pfuI7lPV0Ys7lu7CC1dMO2h9FSIKPQwkRKOILCb2+KpK3HVOES6dkoGPK1rV48CSnFR6QbFcpPiLhYrlcMpHZdavLJBIskbI7z8swzOXTcX4lCjcI9qiE/eRk1lLGruwuemLSbQdfW7VZllc7ZJJGfj72sMP2wx4d2cLnllXg5vnjP3K2k5E2mEgIQpRXr8fl768Uc3B6OjzqN4HGTD++GkFXtqytxiZHB6Rk0tPe3I1TAbdQa8z8BTMl5HXlfVFHl9dpZ50cfv8Q167R7z25/1ee3tnM9b+qUOFILlf+vOaKlW0TVaH/eJ84CeLt6meHTnJtsvlxcJFm1XVV9l2WehMTsyd+Mgn/W11Drn33cvL8MTaajjEPVw+1iUhCmUMJEQhSg7FyDfr/cmqp1X7TFKV6jv7jupe7b1utR3pazKY1NmP7J6yR2XfEvP7ByQZVGSZ+oORk2+HMwGXiEYeBhIiIiLSHAMJERERaY6BhIiIiDTHQEI0ysk6H5NSY7C9pUtN/hwJ5IJ8k1Kjsa2lW026JSJiICEaxWQhsgUT03H/ucX4nze3qGJpI0G8xYTH50/CRS9uQOuXTJYlomMLAwnRKGYxGvDdaVlqUbpLJ2dgqQgk8ukcs1GP88enYnZ+ouqh+Li8VYQVq1oH58qpGZiQGqPWtFm0tRGl1m5cMikd0zLj1LGvbK5XT+18f3qOKt2+tbELL26uU0XWMmPMuGZGDhIjw2DtceGlTXXodHpx3fE5qmx8ZZtjsER8mGjDvecUoa7Tqda8kdf+8Um5qliaXAlY1hmJEu25YkqGKonfJ9rz7PpaVRSNiEYfBhKiUUyWY5e9JE+sqcaf50/CtzJisamxE9fOzMGJOfH44RtbML84DZeKwCEDiSz73tLtwm/e24H/nTMWpxckisDQh5+dlIcLX1iP9l6Pqnvy5IKp2GPrwR9XlONRcV1ZE2XZHpva/9c1VfhwTyteumIa1idH4ZPKVrwogkmPx4dnLpmqapHIsCEf55Ur/soeErky8L++Mx2vb2vEU2tr8MTFk1XAkWHoO8dlYsFLG1X9kkDg8N8zEYUmBhKiUcqk1+OKqRl4dUuDWklXlmWXlVpLxMcLRQh5dFUl3L4v3uHDDXqcNTYJ80Xw8PoP/s4vF8aLCjNgdl4C1td14OTcRNj7vJiTnwRrjxuxZqMaFtLvU4MtJ9aiFtKT9VEKEiNR23FgfZIEiwmFSZEqqMgib8vENRZMTkfFii/W4vmyNhHR6MBAQjRK5SdE4LSCJMSYTZiaEYtc8fXZhcn448oKVQHVqB9auVW+3cv3fL3u4BVdB8hjZDho6naq9XNe+LxODc9kRJtVb8x+l8X3pmWj2+nBTe+V4renjYXZaDjwmv33N/TfW7ZNrovDHhGiYwcDCdEoddKYeOy29uDaN7aor9Ojw/HBD2dhmggnS3ZbceXUTJQ0deGEnHgYRACQ1VWX77Hh6uMy8fymOnXcutqOA67b6fLi/Z0t6smdjfV1KkTIku5dTi/aHG4136S81YHMWLM63u7yYFJaNHLiLKokvAxD8l6yuuvU9BhsEW2Q526os+PqaZn4ULThLBGc/ra2ekiZeiIa3RhIiIJgMho9Br3bJ95UD/xn/ghjFeHgoRXlg1/bxNe3L9sFbyCgJpbWdPTiqmlZMIlA4e/vivjtBzuxQASKSyZn9D+OG1CPCv+1f70YSe6/delOXDQxHVeK8OLy+PHqlnq09Lhww1sluEIEndSocHj7h4Oe3VALu9OjFvuTx3xW3a6GZX757g5cLO41NikKb2xrxI2LS3DVcVm4oDhNrZuzsqpNPY3zjDjf6R0ZjysT0deHgYQoCMnxsbbIMJdD/Is+Ruu2HM57u1qGfC2HWeQcDemcwmQUJkepoZQzCxLx++Vlar/s3YgVIUD2YqRFh2NRSZOa+/Hi5/VDrtUjwol82mZfMjxcLoKMUwSWwsRIVLY7sKa2Q53/0n7nS3vaHHh4n8AkPb1+6Gq/Mri82r9acShJjY1sOfxRRLQvBhKiIBTl5ezKjOlu6HJ2j/hAcigb6u2o6uhT8z3eLm0eXNBuWbkNsWaT+lwGDjk35EjJRe7+La4lC7F9Ut6Klh6nCiPHGp0OgTlFWSu0bgdRqGEgIQpCVmpS/TXjLc/f1ua4z+Pzm7Ruz3C193nUtj8ZTPZfbfdIyR6YuqNcVXg0mJIeU3LRxPS3tW4HUahhICEKgl6v9183u/iZVa07T3l3l/XCQACHfiSFjinhRr3r3lMzby/IzqjQui1EoYaBhChI8bGxHa9eMfU797299rYndvb9JBTmk9DXb3pW7Kb7T8u69exJ+R/qdDo+sEwUJAYSomGItFgcv7909v9dWdv82jsbds7fWWct1rpNWnHow8fLj5F+126t26KFKEtYzxlTCj8+c+KY5UkJ8a1at4coVDGQEA2T0Wj0TsnPKpGb1m3R0qJFix6RHy+//OqbtW4LEYUuBhIiOlpmr9fLIQoiOioMJEQ0bCKIGPPy8iy1tbU2rdtCRKGNgYSIjoZ8yshoMBiG96wwEVE/BhIiGrZAIKATm8gjBq/WbSGi0MZAQkTDJodsxBZhNpu7tW4LEYU2BhIiGjbZQwJZLZ11N4joKDGQENGw+f1+vdjChF6t20JEoY2BhIiGTYQRg8/nCxeBhIvYENFRYSAhomEbGLKRa/xo3RYiCm0MJEQ0bD6fT/aQRISHh3NSKxEdFQYSIhq2/sd+9YJP67YQUWhjICGioMhHfbdt23aGw+GIE5/HuN3uRPH5hJ07d3bIYJKenr47JibGrnU7iSi0MJAQUVDkI759fX2zbTbbzbIomt/vNzmdzrusVqvXbDbvycjIOFvrNhJR6GEgIaKgGAwGnwgdr4kA8iOfz5ck98l5JCKcuFJTU5+Jjo7u0LqNRBR6GEiIKGi5ubk7qqqq/t3W1nYd9q5ng7i4uLVjxox5S+OmEVGIYiAhomFJSUl51uFwzHc6nal6vd6TnZ39UHx8vFXrdhFRaGIgIaJhKSoq2tje3r5YBJIbzGbzG5mZmSu0bhMRhS4GEiIaFjmXJCsr6/nOzs6Li4uLH4yIiHBo3SYiCl0MJERHwdHbG2nv7IpzOl1mj9dj0ro93zSv1+fOLCh60CU+7tpTXqR1e75JsjqtxWzui4qM7ImLjbFzgUGio8NAQjQMshbHR1vKznqypP2GrTbnVFuPO7nH7Y3Sul0a+pPWDfimmQx6T0pUmDU3PqL68qzAoh+cNu0fMdFRXVq3iyhUMZAQBamtvT3x9nc33/vUzt7r/YGAXuv2kDY8Pr+podOZKbfPqnHyP+s+v+rP5xT8fGZB5gat20YUihhIiILg8XhM93yw7Q6GEdrf2hr7rB+8XfaPTxZaTk9JSuDTRkRBYiAhCsKSnXXnvVjWu5BhhA6mtKV7wsOrK//3/vNibjWZjB6t20MUShhIiILwWon1yo4+T7zW7aCR653dXfN/Mq3lidzszGqt20IUShhIiIKwqa5jutZtoJGtqqM3r91uT2AgIQoOAwlREOQERsCgdTNoBJOTXX0+P/9PQhQkBhIiIiLSHAMJEcFs1GNsUiR0e9fJG6Lb5UV1Ry8SI8KQEWNGr8eLirbeIccUinPNRgMauvrQ3utBnNmElKhwhBv3zv2VFcOcHh+au53ocfu+iW+JiEIMAwkRYWxiJDb89FSYTQeONLy/swXz/rEOl03OwF8unqxCxWUvb8Ta2g4MlCb913dmYGpGDG58qwRPrq/BVVMzcd+5xYizmKATGccfCKDN4cbGejueWF2FpXts8PpZ2JSIvsBAQkSDHG4vnttQh46+L55Y3dPaM+SYzFgL/r1wJi54fj02NtgPep0IEWxiLUbYxXUeFwHEYtLj9PwknFeUirMKk/G//9mBx8R+IqIBDCRENKjH5cNjqypQ0d57yONSo8149rLjcNWrm1Bq7f7S49r73Ljzw12qlyQm3IQHzy3G9bPG4FenFuCj8lbsOMS5RHRsYSAhokFhBj2Oz4lHVpxFfe31BVDS3KXmkQyQQy8NXU5MTovG/ecW4ZrXtxz2uoEA0On04Ml11fjOtCwkR4ajKCWKgYSIBjGQENGg+AgTXr3qi1IrMnyc8+wabGroHNwnh3O+99rneGvhTJxflIpbTy+E/sC5sAfVKq7n8vpgsZiQFBH2VTefiEIYAwkRDZI9IY+uqlTBQXKK8FDX6RxyTED8Km9z4Jb3S/HUJVNx06kF6HZ6D3a5A1hMBhgNevj8AXS7j+wcIjo2MJAQ0aBetw8vbKw97BwS6a0dTciKteCPF0xErMWknqQ5FDkc9LOT8xBrNqKxy4mtjV1fVbOJaBRgICGiQbJuyDnjU9Dc7Rrc1+fxYWVV2wHHyvzxnAgvx2fHqXkhBxMZZsSCSemIFh8vmZyOueNS4PEF8My6Guy0cf4IEX2BgYSIBsm6IU9cPGXIvlp7H8548rODHi+HeH67ZCfOLkxGYuQXc0I8Pj+cHj9So8Px5vdmqt4T2ftS2e7AX1dX4/lNtWAZEiLaFwMJEaG+04kfvrEFhoPMTnWIIGHtcePjChu+v2gzekQIcYvAMaCusw9XvrpJVXFdV9eh9r2zqwXVIshEm43qkV85Z6Td4cbnjZ2w9c9PISLaFwMJEcHu9OCfWxoOeUx3qxdlrY6DvvZxReuQr2WpebkRER0pBhIiIiLSHAMJERERaY6BhIiIiDTHQEI0ypkMOvVEi28EPdYyMHl2JLWJiLTFQEI0ikWFGfDBtSdiTU07blmy87DFy74pvz4lHwGdDg+tKNe6KUQ0QjCQEI1iZ41NhtvrxwnZ8chPiFAl36X06HAcL/bJUu4tPS6sq+1Ar8eHAnHMcZmxMOr1qLP3Yl2dXR07IysOZqNBPTmzsd6OgsRITE2PUY8Ef1bdrp7SMep1mJwWg/HJUfD6Ayht6VYrAU9IicaktGj4RBj6tLINVocbenHslPRYXDk1E5832FW7JqXGoDg1GvY+Dz6talPtGZsYIa4Zqwq27bL1oKSpk/VLiEYpBhKiUcoiAsS8olT8dU2VKlw2rygFj31WhQRLGB4+f6KqvioDwz1nj8ePF29Dl8uLv317Cv6+rkbVGrn/nGJc+MJ6LJiQhkIRMl7Z0oAuETyKxOePz5+Mp9fXYKYIKhcVp+H6f2/FueNS8IPp2XhsdSVOGZOIU3MT8PN3t0MngkhDpxOzcuJx0Txx7FtbVfs6+tyoEgFH3vfEnATcceY4PLqqQnwejzl5Cbh16S5cOyMHbl8A75dZ4RDHMYsQjV4MJESjlAwOuQkR+HCPTc3VuHZmjggRtThvfDL0+r1l3/Wyapn4n+zduE68vr7OjsU7mpEWFa56JeRMj/D+XpS1tR2qyJkMKrts3fi0uh3l7Q7847JpiDeb8KNZY/CUuP6nVe3IiDYjN96izk+LMeNCEWpkGXlZZl5eVyaLOnuf6pmRbbjt9EIs2W3F0jKbqnXy5ndn4qEVFTCLe5e3davjiGh0YyAhGqXmT0hFSmQ4bj1zHJIiwjAlIxan5CaoYNDn8athlTDDF5VZY0WokMHjUPNMZESRi+PlJUTiO9MyB9ezkefI4R/Zs7Ived8nLp6Mq1/7XFzbjY+umyWuMLQarAw50eFG1fsi7yyHmOS+ODP/eiI6lvBPPNEoFG8xYXZeIn76dgnW1tlFkAAePK8YFxan4h+b6tTcDdmDIteciTAZ1byMD8usuOnUArwgXs9PjFC9JvuT80Bkj8s1Myx4+fN6NIsAE2HUo9vtw7paOy4oSsWq6jY1x0SeLns/DGLz+oaGHIfHJ0JNxN5ript/UtGGC0WAenN7E6ZnxqG2o0+VnieiYwcDCVEQxJu0Fz6tW3F4mTFmrKntwJbGLhU6pNe2NuJ738pCSXM3HllZgZ+cmIe2XjcGOkmWV7QiM9aC284oRLfTi4GOkso2B4wG/eC139tlRZzZhN+dXqjWtFlT3Y7XRZD406oKXDdzDB46fyJiw43qvtZeFx78bzlumJULj9+Pd0pb4BL73xDH//ykPDxywUS8u6MZr29rRGSYAfecU6Qmyt783g51LzkxVq6VQ0SjHwMJURAyY80Nne3uWK3bcTjbxRv59mW7huyTC9/JTfae9Hl8eGp9DWbnJqDU2oPGLqcaXpGTTNfX23H55Aw16bXP68ebIjDsS4aQ5zfVqW1f6VFm/LeyVfV+3HXWeLxR0qhCzbMba9W2L3n/336wc8i+v66tPuD7eHpD7QH7RjqLydBnMhk9WreDKNQwkBAFYXp2/KbS9pYJWrfjaMiQEBdhQl5ihHrC5Tfvl6rHdsOjwtWKvXIopaS5C0vLrIO9K0dEB4xLjlJDPX9ZXaUeBz4WiZ9fVUJc7LH5zRMdBQYSoiBcMznp+SXlHee1OtxJWrdluGT4eGt70wH75YTWlzfXD/u6MsTI7Vh3ab7ljYzUlEat20EUahhIiIIwZ2Leil9Wtz5652rr3T5/wKB1e2hkOXFM/Jqbz57yiNFo9B7+aCLaFwMJURD0er3/p6dN/IsBAd+jWzp/2dLjStW6TaQ9s1HvnFsYv+yBOTm/i4mMYDcR0TAwkBAFKTY6qvOWC074w6kFlStf2tr8vf+WNZ3W5/ZZtG4XffN0OgQmZSRsv3xSyqKLvjX27ZioSIYRomFiICEaBp1OFzhpQsFquWndFiKi0eD/A/U/kmYIW8P8AAAAAElFTkSuQmCC" + } + }, + "cell_type": "markdown", + "id": "c917b085", + "metadata": {}, + "source": [ + "Let us now define the Workflow for Watermark embedding. Here we use the same tasks as the [quickstart](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/101_MNIST.ipynb), and define following additional steps for Watermarking\n", + "- PRE-TRAIN (watermark_retrain): At the start (once), initial model is trained on Watermark dataset for a specified number of epochs \n", + "- RE-TRAIN (watermark_pretrain): Every training round, Aggregated model is retrained on Watermark dataset until a desired acc threshold is reached or max number of retrain rounds are expired\n", + "\n", + "Notice that both the PRE-TRAIN and RE-TRAIN tasks are defined as Aggregator processing tasks\n", + "\n", + "![image.png](attachment:image.png)\n", + "\n", + "
Workflow for Watermarking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52c4a752", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "class FederatedFlow_MNIST_Watermarking(FLSpec):\n", + " \"\"\"\n", + " This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning\n", + " Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298)\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " model=None,\n", + " optimizer=None,\n", + " watermark_pretrain_optimizer=None,\n", + " watermark_retrain_optimizer=None,\n", + " round_number=0,\n", + " **kwargs,\n", + " ):\n", + " super().__init__(**kwargs)\n", + "\n", + " if model is not None:\n", + " self.model = model\n", + " self.optimizer = optimizer\n", + " self.watermark_pretrain_optimizer = watermark_pretrain_optimizer\n", + " self.watermark_retrain_optimizer = watermark_retrain_optimizer\n", + " else:\n", + " self.model = Net()\n", + " self.optimizer = optim.SGD(\n", + " self.model.parameters(), lr=learning_rate, momentum=momentum\n", + " )\n", + " self.watermark_pretrain_optimizer = optim.SGD(\n", + " self.model.parameters(),\n", + " lr=watermark_pretrain_learning_rate,\n", + " momentum=watermark_pretrain_momentum,\n", + " weight_decay=watermark_pretrain_weight_decay,\n", + " )\n", + " self.watermark_retrain_optimizer = optim.SGD(\n", + " self.model.parameters(), lr=watermark_retrain_learning_rate\n", + " )\n", + " self.round_number = round_number\n", + " self.watermark_pretraining_completed = False\n", + "\n", + " @aggregator\n", + " def start(self):\n", + " \"\"\"\n", + " This is the start of the Flow.\n", + " \"\"\"\n", + "\n", + " print(f\": Start of flow ... \")\n", + " self.collaborators = self.runtime.collaborators\n", + "\n", + " # Randomly select a fraction of actual collaborator every round\n", + " fraction = 0.5\n", + " if int(fraction * len(self.collaborators)) < 1:\n", + " raise Exception(\n", + " f\"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training\"\n", + " )\n", + " self.subset_collaborators = random.sample(\n", + " self.collaborators, int(fraction * (len(self.collaborators)))\n", + " )\n", + "\n", + " self.next(self.watermark_pretrain)\n", + "\n", + " @aggregator\n", + " def watermark_pretrain(self):\n", + " \"\"\"\n", + " Pre-Train the Model before starting Federated Learning.\n", + " \"\"\"\n", + " if not self.watermark_pretraining_completed:\n", + "\n", + " print(\": Performing Watermark Pre-training\")\n", + "\n", + " for i in range(self.pretrain_epochs):\n", + "\n", + " watermark_pretrain_loss = train_model(\n", + " self.model,\n", + " self.watermark_pretrain_optimizer,\n", + " self.watermark_data_loader,\n", + " \":\",\n", + " i,\n", + " log=False,\n", + " )\n", + " watermark_pretrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + "\n", + " print(\n", + " \": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}\".format(\n", + " i,\n", + " watermark_pretrain_loss,\n", + " watermark_pretrain_validation_score,\n", + " )\n", + " )\n", + "\n", + " self.watermark_pretraining_completed = True\n", + "\n", + " self.next(\n", + " self.aggregated_model_validation,\n", + " foreach=\"subset_collaborators\",\n", + " exclude=[\"watermark_pretrain_optimizer\", \"watermark_retrain_optimizer\"],\n", + " )\n", + "\n", + " @collaborator\n", + " def aggregated_model_validation(self):\n", + " \"\"\"\n", + " Perform Aggregated Model validation on Collaborators.\n", + " \"\"\"\n", + " self.agg_validation_score = inference(self.model, self.test_loader)\n", + " print(\n", + " f\" Aggregated Model validation score = {self.agg_validation_score}\"\n", + " )\n", + "\n", + " self.next(self.train)\n", + "\n", + " @collaborator\n", + " def train(self):\n", + " \"\"\"\n", + " Train model on Local collab dataset.\n", + "\n", + " \"\"\"\n", + " print(\": Performing Model Training on Local dataset ... \")\n", + "\n", + " self.optimizer = optim.SGD(\n", + " self.model.parameters(), lr=learning_rate, momentum=momentum\n", + " )\n", + "\n", + " self.loss = train_model(\n", + " self.model,\n", + " self.optimizer,\n", + " self.train_loader,\n", + " \"\"),\n", + " self.round_number if self.round_number is not None else 0,\n", + " log=True,\n", + " )\n", + "\n", + " self.next(self.local_model_validation)\n", + "\n", + " @collaborator\n", + " def local_model_validation(self):\n", + " \"\"\"\n", + " Validate locally trained model.\n", + "\n", + " \"\"\"\n", + " self.local_validation_score = inference(self.model, self.test_loader)\n", + " print(\n", + " f\" Local model validation score = {self.local_validation_score}\"\n", + " )\n", + " self.next(self.join)\n", + "\n", + " @aggregator\n", + " def join(self, inputs):\n", + " \"\"\"\n", + " Model aggregation step.\n", + " \"\"\"\n", + "\n", + " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", + " self.aggregated_model_accuracy = sum(\n", + " input.agg_validation_score for input in inputs\n", + " ) / len(inputs)\n", + " self.local_model_accuracy = sum(\n", + " input.local_validation_score for input in inputs\n", + " ) / len(inputs)\n", + "\n", + " print(f\": Joining models from collaborators...\")\n", + "\n", + " print(\n", + " f\" Aggregated model validation score = {self.aggregated_model_accuracy}\"\n", + " )\n", + " print(f\" Average training loss = {self.average_loss}\")\n", + " print(f\" Average local model validation values = {self.local_model_accuracy}\")\n", + "\n", + " self.model = FedAvg(self.model, [input.model for input in inputs])\n", + "\n", + " self.next(self.watermark_retrain)\n", + "\n", + " @aggregator\n", + " def watermark_retrain(self):\n", + " \"\"\"\n", + " Retrain the aggregated model.\n", + "\n", + " \"\"\"\n", + " print(\": Performing Watermark Retraining ... \")\n", + " self.watermark_retrain_optimizer = optim.SGD(\n", + " self.model.parameters(), lr=watermark_retrain_learning_rate\n", + " )\n", + "\n", + " retrain_round = 0\n", + "\n", + " # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs)\n", + " self.watermark_retrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + " while (\n", + " self.watermark_retrain_validation_score < self.watermark_acc_threshold\n", + " ) and (retrain_round < self.retrain_epochs):\n", + " self.watermark_retrain_train_loss = train_model(\n", + " self.model,\n", + " self.watermark_retrain_optimizer,\n", + " self.watermark_data_loader,\n", + " \"\",\n", + " retrain_round,\n", + " log=False,\n", + " )\n", + " self.watermark_retrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + "\n", + " print(\n", + " \": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}\".format(\n", + " self.round_number,\n", + " retrain_round,\n", + " self.watermark_retrain_train_loss,\n", + " self.watermark_retrain_validation_score,\n", + " )\n", + " )\n", + "\n", + " retrain_round += 1\n", + "\n", + " self.next(self.end)\n", + "\n", + " @aggregator\n", + " def end(self):\n", + " \"\"\"\n", + " This is the last step in the Flow.\n", + "\n", + " \"\"\"\n", + " print(f\"This is the end of the flow\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c6da2c42", + "metadata": {}, + "source": [ + "In the `FederatedFlow_MNIST_Watermarking` definition above, you will notice that certain attributes of the flow were not initialized, namely the `watermark_data_loader` for Aggregator and `train_loader`, `test_loader` for the Collaborators. \n", + "\n", + "- Collaborator attributes are created in the same manner as described in [quickstart](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/101_MNIST.ipynb)\n", + "\n", + "- `watermark_data_loader` is created as a **private attribute** of the Aggregator which is set by `callable_to_initialize_aggregator_private_attributes` callable function. It is exposed only via the runtime. This property enables the Watermark dataset to be hidden from the collaborators as Aggregator private attributes are filtered before the state is transferred to Collaborators (in the same manner as Collaborator private attributes are hidden from Aggregator)\n", + "\n", + "Lets define these attributes along with some other parameters (seed, batch-sizes, optimizer parameters) and create the LocalRuntime" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bffcc141", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "# Set random seed\n", + "random_seed = 42\n", + "torch.manual_seed(random_seed)\n", + "np.random.seed(random_seed)\n", + "torch.backends.cudnn.enabled = False\n", + "\n", + "# Batch sizes\n", + "batch_size_train = 64\n", + "batch_size_test = 64\n", + "batch_size_watermark = 50\n", + "\n", + "# MNIST parameters\n", + "learning_rate = 5e-2\n", + "momentum = 5e-1\n", + "log_interval = 20\n", + "\n", + "# Watermarking parameters\n", + "watermark_pretrain_learning_rate = 1e-1\n", + "watermark_pretrain_momentum = 5e-1\n", + "watermark_pretrain_weight_decay = 5e-05\n", + "watermark_retrain_learning_rate = 5e-3" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "3d7ce52f", + "metadata": {}, + "source": [ + "## Setup Federation\n", + "\n", + "Private attributes can be set using callback function while instantiating the participant. Parameters required by the callback function are specified as arguments while instantiating the participant. In this example callback function, there are 2 callable function namely `callable_to_initialize_aggregator_private_attributes`, and `callable_to_initialize_collaborator_private_attributes`, returns the private attributes respectively for aggregator and collaborator.\n", + "\n", + "\n", + "Aggregator callable function `callable_to_initialize_aggregator_private_attributes` returns `watermark_data_loader`, `pretrain_epochs`, `retrain_epochs`, `watermark_acc_threshold`, and `watermark_pretraining_completed`. Collaborator callable function `callable_to_initialize_aggregator_private_attributes` returns `train_loader` and `test_loader` of the collaborator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5f6e104", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size):\n", + " return {\n", + " \"watermark_data_loader\": torch.utils.data.DataLoader(\n", + " watermark_data, batch_size=batch_size, shuffle=True\n", + " ),\n", + " \"pretrain_epochs\": 25,\n", + " \"retrain_epochs\": 25,\n", + " \"watermark_acc_threshold\": 0.98,\n", + " }\n", + "\n", + "# Setup Aggregator private attributes via callable function\n", + "aggregator = Aggregator(\n", + " name=\"agg\",\n", + " private_attributes_callable=callable_to_initialize_aggregator_private_attributes,\n", + " watermark_data=watermark_data,\n", + " batch_size=batch_size_watermark,\n", + " )\n", + "\n", + "collaborator_names = [\n", + " \"Portland\",\n", + " \"Seattle\",\n", + " \"Chandler\",\n", + " \"Bangalore\",\n", + " \"New Delhi\",\n", + "]\n", + "\n", + "def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset):\n", + " train = deepcopy(train_dataset)\n", + " test = deepcopy(test_dataset)\n", + " train.data = train_dataset.data[index::n_collaborators]\n", + " train.targets = train_dataset.targets[index::n_collaborators]\n", + " test.data = test_dataset.data[index::n_collaborators]\n", + " test.targets = test_dataset.targets[index::n_collaborators]\n", + "\n", + " return {\n", + " \"train_loader\": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True),\n", + " \"test_loader\": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True),\n", + " }\n", + "\n", + "# Setup Collaborators private attributes via callable function\n", + "collaborators = []\n", + "for idx, collaborator_name in enumerate(collaborator_names):\n", + " collaborators.append(\n", + " Collaborator(\n", + " name=collaborator_name, num_cpus=0, num_gpus=0,\n", + " private_attributes_callable=callable_to_initialize_collaborator_private_attributes,\n", + " index=idx, n_collaborators=len(collaborator_names),\n", + " train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64\n", + " )\n", + " )\n", + "\n", + "local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend=\"ray\")\n", + "print(f\"Local runtime collaborators = {local_runtime.collaborators}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "02935ccf", + "metadata": {}, + "source": [ + "Now that we have our flow and runtime defined, let's run the experiment! " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6d19819", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "model = Net()\n", + "optimizer = optim.SGD(\n", + " model.parameters(), lr=learning_rate, momentum=momentum\n", + ")\n", + "watermark_pretrain_optimizer = optim.SGD(\n", + " model.parameters(),\n", + " lr=watermark_pretrain_learning_rate,\n", + " momentum=watermark_pretrain_momentum,\n", + " weight_decay=watermark_pretrain_weight_decay,\n", + ")\n", + "watermark_retrain_optimizer = optim.SGD(\n", + " model.parameters(), lr=watermark_retrain_learning_rate\n", + ")\n", + "best_model = None\n", + "round_number = 0\n", + "top_model_accuracy = 0\n", + "\n", + "flflow = FederatedFlow_MNIST_Watermarking(\n", + " model,\n", + " optimizer,\n", + " watermark_pretrain_optimizer,\n", + " watermark_retrain_optimizer,\n", + " round_number,\n", + " checkpoint=True,\n", + ")\n", + "flflow.runtime = local_runtime\n", + "for i in range(1):\n", + " print(f\"Starting round {i}...\")\n", + " flflow.run()\n", + " flflow.round_number += 1\n", + " if hasattr(flflow, \"aggregated_model_accuracy\"):\n", + " aggregated_model_accuracy = flflow.aggregated_model_accuracy\n", + " if aggregated_model_accuracy > top_model_accuracy:\n", + " print(\n", + " f\"\\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\\n\"\n", + " )\n", + " top_model_accuracy = aggregated_model_accuracy\n", + " best_model = flflow.model\n", + "\n", + " torch.save(best_model.state_dict(), \"watermarked_mnist_model.pth\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "env-workspace-builder-openfl", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.19" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace new file mode 100644 index 0000000000..3c2c5d08b4 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace @@ -0,0 +1,2 @@ +current_plan_name: default + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml new file mode 100644 index 0000000000..95307de3bc --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml @@ -0,0 +1,5 @@ +# Copyright (C) 2020-2021 Intel Corporation +# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you. + +collaborators: + \ No newline at end of file diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml new file mode 100644 index 0000000000..f39d623fc6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml @@ -0,0 +1,51 @@ +Bangalore: + callable_func: + settings: + batch_size: 64 + index: 3 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Chandler: + callable_func: + settings: + batch_size: 64 + index: 2 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +New Delhi: + callable_func: + settings: + batch_size: 64 + index: 4 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Portland: + callable_func: + settings: + batch_size: 64 + index: 0 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Seattle: + callable_func: + settings: + batch_size: 64 + index: 1 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +aggregator: + callable_func: + settings: + batch_size: 50 + watermark_data: src.experiment.watermark_data + template: src.experiment.callable_to_initialize_aggregator_private_attributes diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults new file mode 100644 index 0000000000..fb82f9c5b6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults @@ -0,0 +1,2 @@ +../../workspace/plan/defaults + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml new file mode 100644 index 0000000000..c9bea91dfa --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml @@ -0,0 +1,20 @@ +aggregator: + defaults: plan/defaults/aggregator.yaml + settings: + rounds_to_train: 1 + template: openfl.experimental.workflow.component.Aggregator +collaborator: + defaults: plan/defaults/collaborator.yaml + settings: {} + template: openfl.experimental.workflow.component.Collaborator +federated_flow: + settings: + checkpoint: true + model: src.experiment.model + optimizer: src.experiment.optimizer + round_number: 0 + watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer + watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer + template: src.experiment.FederatedFlow_MNIST_Watermarking +network: + defaults: plan/defaults/network.yaml diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt new file mode 100644 index 0000000000..8946ff2cac --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt @@ -0,0 +1,6 @@ +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability +torch +torchvision +matplotlib +git+https://github.com/pyviz-topics/imagen.git@master +holoviews==1.15.4 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py new file mode 100644 index 0000000000..49883934a8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py new file mode 100644 index 0000000000..a984387881 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py @@ -0,0 +1,664 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../../301_MNIST_Watermarking.ipynb. + +# %% auto 0 +__all__ = ['random_seed', 'mnist_train', 'mnist_test', 'watermark_dir', 'watermark_path', 'watermark_data', 'display_watermark', + 'batch_size_train', 'batch_size_test', 'batch_size_watermark', 'learning_rate', 'momentum', 'log_interval', + 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', 'watermark_pretrain_weight_decay', + 'watermark_retrain_learning_rate', 'aggregator', 'collaborator_names', 'collaborators', 'local_runtime', + 'model', 'optimizer', 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'best_model', + 'round_number', 'top_model_accuracy', 'flflow', 'Net', 'inference', 'train_model', 'generate_watermark', + 'WatermarkDataset', 'get_watermark_transforms', 'FedAvg', 'FederatedFlow_MNIST_Watermarking', + 'callable_to_initialize_aggregator_private_attributes', + 'callable_to_initialize_collaborator_private_attributes'] + +# %% ../../../301_MNIST_Watermarking.ipynb 7 + + +# Uncomment this if running in Google Colab +#import os +#os.environ["USERNAME"] = "colab" + +# %% ../../../301_MNIST_Watermarking.ipynb 9 +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch +import torchvision +import numpy as np +import random +import pathlib +import os +import matplotlib +import matplotlib.pyplot as plt +import PIL.Image as Image +import imagen as ig +import numbergen as ng + +random_seed = 1 +torch.backends.cudnn.enabled = False +torch.manual_seed(random_seed) + +# MNIST Train and Test datasets +mnist_train = torchvision.datasets.MNIST( + "./files/", + train=True, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + +mnist_test = torchvision.datasets.MNIST( + "./files/", + train=False, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + + +class Net(nn.Module): + def __init__(self, dropout=0.0): + super(Net, self).__init__() + self.dropout = dropout + self.block = nn.Sequential( + nn.Conv2d(1, 32, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(32, 64, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(64, 128, 2), + nn.ReLU(), + ) + self.fc1 = nn.Linear(128 * 5**2, 200) + self.fc2 = nn.Linear(200, 10) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x): + x = self.dropout(x) + out = self.block(x) + out = out.view(-1, 128 * 5**2) + out = self.dropout(out) + out = self.relu(self.fc1(out)) + out = self.dropout(out) + out = self.fc2(out) + return F.log_softmax(out, 1) + + +def inference(network, test_loader): + network.eval() + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + output = network(data) + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).sum() + accuracy = float(correct / len(test_loader.dataset)) + return accuracy + + +def train_model(model, optimizer, data_loader, entity, round_number, log=False): + # Helper function to train the model + train_loss = 0 + log_interval = 20 + model.train() + for batch_idx, (X, y) in enumerate(data_loader): + optimizer.zero_grad() + + output = model(X) + loss = F.nll_loss(output, y) + loss.backward() + + optimizer.step() + + train_loss += loss.item() * len(X) + if batch_idx % log_interval == 0 and log: + print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( + entity, + round_number, + batch_idx * len(X), + len(data_loader.dataset), + 100.0 * batch_idx / len(data_loader), + loss.item(), + ) + ) + train_loss /= len(data_loader.dataset) + return train_loss + +# %% ../../../301_MNIST_Watermarking.ipynb 11 +watermark_dir = "./files/watermark-dataset/MWAFFLE/" + + +def generate_watermark( + x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir +): + """ + Generate Watermark by superimposing a pattern on noisy background. + + Parameters + ---------- + x_size: x dimension of the image + y_size: y dimension of the image + num_class: number of classes in the original dataset + num_samples_per_class: number of samples to be generated per class + img_dir: directory for saving watermark dataset + + Reference + --------- + WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + + """ + x_pattern = int(x_size * 2 / 3.0 - 1) + y_pattern = int(y_size * 2 / 3.0 - 1) + + np.random.seed(0) + for cls in range(num_class): + patterns = [] + random_seed = 10 + cls + patterns.append( + ig.Line( + xdensity=x_pattern, + ydensity=y_pattern, + thickness=0.001, + orientation=np.pi * ng.UniformRandom(seed=random_seed), + x=ng.UniformRandom(seed=random_seed) - 0.5, + y=ng.UniformRandom(seed=random_seed) - 0.5, + scale=0.8, + ) + ) + patterns.append( + ig.Arc( + xdensity=x_pattern, + ydensity=y_pattern, + thickness=0.001, + orientation=np.pi * ng.UniformRandom(seed=random_seed), + x=ng.UniformRandom(seed=random_seed) - 0.5, + y=ng.UniformRandom(seed=random_seed) - 0.5, + size=0.33, + ) + ) + + pat = np.zeros((x_pattern, y_pattern)) + for i in range(6): + j = np.random.randint(len(patterns)) + pat += patterns[j]() + res = pat > 0.5 + pat = res.astype(int) + + x_offset = np.random.randint(x_size - x_pattern + 1) + y_offset = np.random.randint(y_size - y_pattern + 1) + + for i in range(num_samples_per_class): + base = np.random.rand(x_size, y_size) + # base = np.zeros((x_input, y_input)) + base[ + x_offset : x_offset + pat.shape[0], + y_offset : y_offset + pat.shape[1], + ] += pat + d = np.ones((x_size, x_size)) + img = np.minimum(base, d) + if not os.path.exists(img_dir + str(cls) + "/"): + os.makedirs(img_dir + str(cls) + "/") + plt.imsave( + img_dir + str(cls) + "/wm_" + str(i + 1) + ".png", + img, + cmap=matplotlib.cm.gray, + ) + + +# If the Watermark dataset does not exist, generate and save the Watermark images +watermark_path = pathlib.Path(watermark_dir) +if watermark_path.exists() and watermark_path.is_dir(): + print( + f"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... " + ) + pass +else: + print(f"Generating Watermark dataset... ") + generate_watermark() + + +class WatermarkDataset(torch.utils.data.Dataset): + def __init__(self, images_dir, label_dir=None, transforms=None): + self.images_dir = os.path.abspath(images_dir) + self.image_paths = [ + os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir) + ] + self.label_paths = label_dir + self.transform = transforms + temp = [] + + # Recursively counting total number of images in the directory + for image_path in self.image_paths: + for path in os.walk(image_path): + if len(path) <= 1: + continue + path = path[2] + for im_n in [image_path + "/" + p for p in path]: + temp.append(im_n) + self.image_paths = temp + + if len(self.image_paths) == 0: + raise Exception(f"No file(s) found under {images_dir}") + + def __len__(self): + return len(self.image_paths) + + def __getitem__(self, idx): + image_filepath = self.image_paths[idx] + image = Image.open(image_filepath) + image = image.convert("RGB") + image = self.transform(image) + label = int(image_filepath.split("/")[-2]) + + return image, label + + +def get_watermark_transforms(): + return torchvision.transforms.Compose( + [ + torchvision.transforms.Grayscale(), + torchvision.transforms.Resize(28), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize + ] + ) + + +watermark_data = WatermarkDataset( + images_dir=watermark_dir, + transforms=get_watermark_transforms(), +) + +# Set display_watermark to True to display the Watermark dataset +display_watermark = True +if display_watermark: + # Inspect and plot the Watermark Images + wm_images = np.empty((100, 28, 28)) + wm_labels = np.empty([100, 1], dtype=int) + + for i in range(len(watermark_data)): + img, label = watermark_data[i] + wm_labels[label * 10 + i % 10] = label + wm_images[label * 10 + i % 10, :, :] = img.numpy() + + fig = plt.figure(figsize=(120, 120)) + for i in range(100): + plt.subplot(10, 10, i + 1) + plt.imshow(wm_images[i], interpolation="none") + plt.title("Label: {}".format(wm_labels[i]), fontsize=80) + +# %% ../../../301_MNIST_Watermarking.ipynb 13 +from copy import deepcopy + +from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator +from openfl.experimental.workflow.runtime import LocalRuntime +from openfl.experimental.workflow.placement import aggregator, collaborator +from openfl.experimental.workflow.utilities.ui import InspectFlow + + +def FedAvg(agg_model, models, weights=None): + state_dicts = [model.state_dict() for model in models] + state_dict = agg_model.state_dict() + for key in models[0].state_dict(): + state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], + axis=0, + weights=weights)) + + agg_model.load_state_dict(state_dict) + return agg_model + +# %% ../../../301_MNIST_Watermarking.ipynb 15 +class FederatedFlow_MNIST_Watermarking(FLSpec): + """ + This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning + Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + """ + + def __init__( + self, + model=None, + optimizer=None, + watermark_pretrain_optimizer=None, + watermark_retrain_optimizer=None, + round_number=0, + **kwargs, + ): + super().__init__(**kwargs) + + if model is not None: + self.model = model + self.optimizer = optimizer + self.watermark_pretrain_optimizer = watermark_pretrain_optimizer + self.watermark_retrain_optimizer = watermark_retrain_optimizer + else: + self.model = Net() + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + self.watermark_pretrain_optimizer = optim.SGD( + self.model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, + ) + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + self.round_number = round_number + self.watermark_pretraining_completed = False + + @aggregator + def start(self): + """ + This is the start of the Flow. + """ + + print(f": Start of flow ... ") + self.collaborators = self.runtime.collaborators + + # Randomly select a fraction of actual collaborator every round + fraction = 0.5 + if int(fraction * len(self.collaborators)) < 1: + raise Exception( + f"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training" + ) + self.subset_collaborators = random.sample( + self.collaborators, int(fraction * (len(self.collaborators))) + ) + + self.next(self.watermark_pretrain) + + @aggregator + def watermark_pretrain(self): + """ + Pre-Train the Model before starting Federated Learning. + """ + if not self.watermark_pretraining_completed: + + print(": Performing Watermark Pre-training") + + for i in range(self.pretrain_epochs): + + watermark_pretrain_loss = train_model( + self.model, + self.watermark_pretrain_optimizer, + self.watermark_data_loader, + ":", + i, + log=False, + ) + watermark_pretrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print( + ": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}".format( + i, + watermark_pretrain_loss, + watermark_pretrain_validation_score, + ) + ) + + self.watermark_pretraining_completed = True + + self.next( + self.aggregated_model_validation, + foreach="subset_collaborators", + exclude=["watermark_pretrain_optimizer", "watermark_retrain_optimizer"], + ) + + @collaborator + def aggregated_model_validation(self): + """ + Perform Aggregated Model validation on Collaborators. + """ + self.agg_validation_score = inference(self.model, self.test_loader) + print( + f" Aggregated Model validation score = {self.agg_validation_score}" + ) + + self.next(self.train) + + @collaborator + def train(self): + """ + Train model on Local collab dataset. + + """ + print(": Performing Model Training on Local dataset ... ") + + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + + self.loss = train_model( + self.model, + self.optimizer, + self.train_loader, + ""), + self.round_number if self.round_number is not None else 0, + log=True, + ) + + self.next(self.local_model_validation) + + @collaborator + def local_model_validation(self): + """ + Validate locally trained model. + + """ + self.local_validation_score = inference(self.model, self.test_loader) + print( + f" Local model validation score = {self.local_validation_score}" + ) + self.next(self.join) + + @aggregator + def join(self, inputs): + """ + Model aggregation step. + """ + + self.average_loss = sum(input.loss for input in inputs) / len(inputs) + self.aggregated_model_accuracy = sum( + input.agg_validation_score for input in inputs + ) / len(inputs) + self.local_model_accuracy = sum( + input.local_validation_score for input in inputs + ) / len(inputs) + + print(f": Joining models from collaborators...") + + print( + f" Aggregated model validation score = {self.aggregated_model_accuracy}" + ) + print(f" Average training loss = {self.average_loss}") + print(f" Average local model validation values = {self.local_model_accuracy}") + + self.model = FedAvg(self.model, [input.model for input in inputs]) + + self.next(self.watermark_retrain) + + @aggregator + def watermark_retrain(self): + """ + Retrain the aggregated model. + + """ + print(": Performing Watermark Retraining ... ") + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + + retrain_round = 0 + + # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + while ( + self.watermark_retrain_validation_score < self.watermark_acc_threshold + ) and (retrain_round < self.retrain_epochs): + self.watermark_retrain_train_loss = train_model( + self.model, + self.watermark_retrain_optimizer, + self.watermark_data_loader, + "", + retrain_round, + log=False, + ) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print( + ": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}".format( + self.round_number, + retrain_round, + self.watermark_retrain_train_loss, + self.watermark_retrain_validation_score, + ) + ) + + retrain_round += 1 + + self.next(self.end) + + @aggregator + def end(self): + """ + This is the last step in the Flow. + + """ + print(f"This is the end of the flow") + +# %% ../../../301_MNIST_Watermarking.ipynb 17 +# Set random seed +random_seed = 42 +torch.manual_seed(random_seed) +np.random.seed(random_seed) +torch.backends.cudnn.enabled = False + +# Batch sizes +batch_size_train = 64 +batch_size_test = 64 +batch_size_watermark = 50 + +# MNIST parameters +learning_rate = 5e-2 +momentum = 5e-1 +log_interval = 20 + +# Watermarking parameters +watermark_pretrain_learning_rate = 1e-1 +watermark_pretrain_momentum = 5e-1 +watermark_pretrain_weight_decay = 5e-05 +watermark_retrain_learning_rate = 5e-3 + +# %% ../../../301_MNIST_Watermarking.ipynb 19 +def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size): + return { + "watermark_data_loader": torch.utils.data.DataLoader( + watermark_data, batch_size=batch_size, shuffle=True + ), + "pretrain_epochs": 25, + "retrain_epochs": 25, + "watermark_acc_threshold": 0.98, + } + +# Setup Aggregator private attributes via callable function +aggregator = Aggregator( + name="agg", + private_attributes_callable=callable_to_initialize_aggregator_private_attributes, + watermark_data=watermark_data, + batch_size=batch_size_watermark, + ) + +collaborator_names = [ + "Portland", + "Seattle", + "Chandler", + "Bangalore", + "New Delhi", +] + +def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset): + train = deepcopy(train_dataset) + test = deepcopy(test_dataset) + train.data = train_dataset.data[index::n_collaborators] + train.targets = train_dataset.targets[index::n_collaborators] + test.data = test_dataset.data[index::n_collaborators] + test.targets = test_dataset.targets[index::n_collaborators] + + return { + "train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True), + "test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True), + } + +# Setup Collaborators private attributes via callable function +collaborators = [] +for idx, collaborator_name in enumerate(collaborator_names): + collaborators.append( + Collaborator( + name=collaborator_name, num_cpus=0, num_gpus=0, + private_attributes_callable=callable_to_initialize_collaborator_private_attributes, + index=idx, n_collaborators=len(collaborator_names), + train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64 + ) + ) + +local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="single_process") +print(f"Local runtime collaborators = {local_runtime.collaborators}") + +# %% ../../../301_MNIST_Watermarking.ipynb 21 +model = Net() +optimizer = optim.SGD( + model.parameters(), lr=learning_rate, momentum=momentum +) +watermark_pretrain_optimizer = optim.SGD( + model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, +) +watermark_retrain_optimizer = optim.SGD( + model.parameters(), lr=watermark_retrain_learning_rate +) +best_model = None +round_number = 0 +top_model_accuracy = 0 + +flflow = FederatedFlow_MNIST_Watermarking( + model, + optimizer, + watermark_pretrain_optimizer, + watermark_retrain_optimizer, + round_number, + checkpoint=True, +) +flflow.runtime = local_runtime +for i in range(1): + print(f"Starting round {i}...") +# flflow.run() + flflow.round_number += 1 + if hasattr(flflow, "aggregated_model_accuracy"): + aggregated_model_accuracy = flflow.aggregated_model_accuracy + if aggregated_model_accuracy > top_model_accuracy: + print( + f"\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\n" + ) + top_model_accuracy = aggregated_model_accuracy + best_model = flflow.model + + torch.save(best_model.state_dict(), "watermarked_mnist_model.pth") diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace new file mode 100644 index 0000000000..3c2c5d08b4 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace @@ -0,0 +1,2 @@ +current_plan_name: default + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml new file mode 100644 index 0000000000..95307de3bc --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml @@ -0,0 +1,5 @@ +# Copyright (C) 2020-2021 Intel Corporation +# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you. + +collaborators: + \ No newline at end of file diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml new file mode 100644 index 0000000000..f39d623fc6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml @@ -0,0 +1,51 @@ +Bangalore: + callable_func: + settings: + batch_size: 64 + index: 3 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Chandler: + callable_func: + settings: + batch_size: 64 + index: 2 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +New Delhi: + callable_func: + settings: + batch_size: 64 + index: 4 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Portland: + callable_func: + settings: + batch_size: 64 + index: 0 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +Seattle: + callable_func: + settings: + batch_size: 64 + index: 1 + n_collaborators: 5 + test_dataset: src.experiment.mnist_test + train_dataset: src.experiment.mnist_train + template: src.experiment.callable_to_initialize_collaborator_private_attributes +aggregator: + callable_func: + settings: + batch_size: 50 + watermark_data: src.experiment.watermark_data + template: src.experiment.callable_to_initialize_aggregator_private_attributes diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults new file mode 100644 index 0000000000..fb82f9c5b6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults @@ -0,0 +1,2 @@ +../../workspace/plan/defaults + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml new file mode 100644 index 0000000000..c9bea91dfa --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml @@ -0,0 +1,20 @@ +aggregator: + defaults: plan/defaults/aggregator.yaml + settings: + rounds_to_train: 1 + template: openfl.experimental.workflow.component.Aggregator +collaborator: + defaults: plan/defaults/collaborator.yaml + settings: {} + template: openfl.experimental.workflow.component.Collaborator +federated_flow: + settings: + checkpoint: true + model: src.experiment.model + optimizer: src.experiment.optimizer + round_number: 0 + watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer + watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer + template: src.experiment.FederatedFlow_MNIST_Watermarking +network: + defaults: plan/defaults/network.yaml diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt new file mode 100644 index 0000000000..8946ff2cac --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt @@ -0,0 +1,6 @@ +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability +torch +torchvision +matplotlib +git+https://github.com/pyviz-topics/imagen.git@master +holoviews==1.15.4 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py new file mode 100644 index 0000000000..49883934a8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py new file mode 100644 index 0000000000..7612dc2dea --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py @@ -0,0 +1,664 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb. + +# %% auto 0 +__all__ = ['random_seed', 'mnist_train', 'mnist_test', 'watermark_dir', 'watermark_path', 'watermark_data', 'display_watermark', + 'batch_size_train', 'batch_size_test', 'batch_size_watermark', 'learning_rate', 'momentum', 'log_interval', + 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', 'watermark_pretrain_weight_decay', + 'watermark_retrain_learning_rate', 'aggregator', 'collaborator_names', 'collaborators', 'local_runtime', + 'model', 'optimizer', 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'best_model', + 'round_number', 'top_model_accuracy', 'flflow', 'Net', 'inference', 'train_model', 'generate_watermark', + 'WatermarkDataset', 'get_watermark_transforms', 'FedAvg', 'FederatedFlow_MNIST_Watermarking', + 'callable_to_initialize_aggregator_private_attributes', + 'callable_to_initialize_collaborator_private_attributes'] + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 7 + + +# Uncomment this if running in Google Colab +#import os +#os.environ["USERNAME"] = "colab" + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 9 +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch +import torchvision +import numpy as np +import random +import pathlib +import os +import matplotlib +import matplotlib.pyplot as plt +import PIL.Image as Image +import imagen as ig +import numbergen as ng + +random_seed = 1 +torch.backends.cudnn.enabled = False +torch.manual_seed(random_seed) + +# MNIST Train and Test datasets +mnist_train = torchvision.datasets.MNIST( + "./files/", + train=True, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + +mnist_test = torchvision.datasets.MNIST( + "./files/", + train=False, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + + +class Net(nn.Module): + def __init__(self, dropout=0.0): + super(Net, self).__init__() + self.dropout = dropout + self.block = nn.Sequential( + nn.Conv2d(1, 32, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(32, 64, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(64, 128, 2), + nn.ReLU(), + ) + self.fc1 = nn.Linear(128 * 5**2, 200) + self.fc2 = nn.Linear(200, 10) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x): + x = self.dropout(x) + out = self.block(x) + out = out.view(-1, 128 * 5**2) + out = self.dropout(out) + out = self.relu(self.fc1(out)) + out = self.dropout(out) + out = self.fc2(out) + return F.log_softmax(out, 1) + + +def inference(network, test_loader): + network.eval() + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + output = network(data) + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).sum() + accuracy = float(correct / len(test_loader.dataset)) + return accuracy + + +def train_model(model, optimizer, data_loader, entity, round_number, log=False): + # Helper function to train the model + train_loss = 0 + log_interval = 20 + model.train() + for batch_idx, (X, y) in enumerate(data_loader): + optimizer.zero_grad() + + output = model(X) + loss = F.nll_loss(output, y) + loss.backward() + + optimizer.step() + + train_loss += loss.item() * len(X) + if batch_idx % log_interval == 0 and log: + print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( + entity, + round_number, + batch_idx * len(X), + len(data_loader.dataset), + 100.0 * batch_idx / len(data_loader), + loss.item(), + ) + ) + train_loss /= len(data_loader.dataset) + return train_loss + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 11 +watermark_dir = "./files/watermark-dataset/MWAFFLE/" + + +def generate_watermark( + x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir +): + """ + Generate Watermark by superimposing a pattern on noisy background. + + Parameters + ---------- + x_size: x dimension of the image + y_size: y dimension of the image + num_class: number of classes in the original dataset + num_samples_per_class: number of samples to be generated per class + img_dir: directory for saving watermark dataset + + Reference + --------- + WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + + """ + x_pattern = int(x_size * 2 / 3.0 - 1) + y_pattern = int(y_size * 2 / 3.0 - 1) + + np.random.seed(0) + for cls in range(num_class): + patterns = [] + random_seed = 10 + cls + patterns.append( + ig.Line( + xdensity=x_pattern, + ydensity=y_pattern, + thickness=0.001, + orientation=np.pi * ng.UniformRandom(seed=random_seed), + x=ng.UniformRandom(seed=random_seed) - 0.5, + y=ng.UniformRandom(seed=random_seed) - 0.5, + scale=0.8, + ) + ) + patterns.append( + ig.Arc( + xdensity=x_pattern, + ydensity=y_pattern, + thickness=0.001, + orientation=np.pi * ng.UniformRandom(seed=random_seed), + x=ng.UniformRandom(seed=random_seed) - 0.5, + y=ng.UniformRandom(seed=random_seed) - 0.5, + size=0.33, + ) + ) + + pat = np.zeros((x_pattern, y_pattern)) + for i in range(6): + j = np.random.randint(len(patterns)) + pat += patterns[j]() + res = pat > 0.5 + pat = res.astype(int) + + x_offset = np.random.randint(x_size - x_pattern + 1) + y_offset = np.random.randint(y_size - y_pattern + 1) + + for i in range(num_samples_per_class): + base = np.random.rand(x_size, y_size) + # base = np.zeros((x_input, y_input)) + base[ + x_offset : x_offset + pat.shape[0], + y_offset : y_offset + pat.shape[1], + ] += pat + d = np.ones((x_size, x_size)) + img = np.minimum(base, d) + if not os.path.exists(img_dir + str(cls) + "/"): + os.makedirs(img_dir + str(cls) + "/") + plt.imsave( + img_dir + str(cls) + "/wm_" + str(i + 1) + ".png", + img, + cmap=matplotlib.cm.gray, + ) + + +# If the Watermark dataset does not exist, generate and save the Watermark images +watermark_path = pathlib.Path(watermark_dir) +if watermark_path.exists() and watermark_path.is_dir(): + print( + f"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... " + ) + pass +else: + print(f"Generating Watermark dataset... ") + generate_watermark() + + +class WatermarkDataset(torch.utils.data.Dataset): + def __init__(self, images_dir, label_dir=None, transforms=None): + self.images_dir = os.path.abspath(images_dir) + self.image_paths = [ + os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir) + ] + self.label_paths = label_dir + self.transform = transforms + temp = [] + + # Recursively counting total number of images in the directory + for image_path in self.image_paths: + for path in os.walk(image_path): + if len(path) <= 1: + continue + path = path[2] + for im_n in [image_path + "/" + p for p in path]: + temp.append(im_n) + self.image_paths = temp + + if len(self.image_paths) == 0: + raise Exception(f"No file(s) found under {images_dir}") + + def __len__(self): + return len(self.image_paths) + + def __getitem__(self, idx): + image_filepath = self.image_paths[idx] + image = Image.open(image_filepath) + image = image.convert("RGB") + image = self.transform(image) + label = int(image_filepath.split("/")[-2]) + + return image, label + + +def get_watermark_transforms(): + return torchvision.transforms.Compose( + [ + torchvision.transforms.Grayscale(), + torchvision.transforms.Resize(28), + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize + ] + ) + + +watermark_data = WatermarkDataset( + images_dir=watermark_dir, + transforms=get_watermark_transforms(), +) + +# Set display_watermark to True to display the Watermark dataset +display_watermark = True +if display_watermark: + # Inspect and plot the Watermark Images + wm_images = np.empty((100, 28, 28)) + wm_labels = np.empty([100, 1], dtype=int) + + for i in range(len(watermark_data)): + img, label = watermark_data[i] + wm_labels[label * 10 + i % 10] = label + wm_images[label * 10 + i % 10, :, :] = img.numpy() + + fig = plt.figure(figsize=(120, 120)) + for i in range(100): + plt.subplot(10, 10, i + 1) + plt.imshow(wm_images[i], interpolation="none") + plt.title("Label: {}".format(wm_labels[i]), fontsize=80) + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 13 +from copy import deepcopy + +from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator +from openfl.experimental.workflow.runtime import LocalRuntime +from openfl.experimental.workflow.placement import aggregator, collaborator +from openfl.experimental.workflow.utilities.ui import InspectFlow + + +def FedAvg(agg_model, models, weights=None): + state_dicts = [model.state_dict() for model in models] + state_dict = agg_model.state_dict() + for key in models[0].state_dict(): + state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], + axis=0, + weights=weights)) + + agg_model.load_state_dict(state_dict) + return agg_model + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 15 +class FederatedFlow_MNIST_Watermarking(FLSpec): + """ + This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning + Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + """ + + def __init__( + self, + model=None, + optimizer=None, + watermark_pretrain_optimizer=None, + watermark_retrain_optimizer=None, + round_number=0, + **kwargs, + ): + super().__init__(**kwargs) + + if model is not None: + self.model = model + self.optimizer = optimizer + self.watermark_pretrain_optimizer = watermark_pretrain_optimizer + self.watermark_retrain_optimizer = watermark_retrain_optimizer + else: + self.model = Net() + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + self.watermark_pretrain_optimizer = optim.SGD( + self.model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, + ) + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + self.round_number = round_number + self.watermark_pretraining_completed = False + + @aggregator + def start(self): + """ + This is the start of the Flow. + """ + + print(f": Start of flow ... ") + self.collaborators = self.runtime.collaborators + + # Randomly select a fraction of actual collaborator every round + fraction = 0.5 + if int(fraction * len(self.collaborators)) < 1: + raise Exception( + f"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training" + ) + self.subset_collaborators = random.sample( + self.collaborators, int(fraction * (len(self.collaborators))) + ) + + self.next(self.watermark_pretrain) + + @aggregator + def watermark_pretrain(self): + """ + Pre-Train the Model before starting Federated Learning. + """ + if not self.watermark_pretraining_completed: + + print(": Performing Watermark Pre-training") + + for i in range(self.pretrain_epochs): + + watermark_pretrain_loss = train_model( + self.model, + self.watermark_pretrain_optimizer, + self.watermark_data_loader, + ":", + i, + log=False, + ) + watermark_pretrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print( + ": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}".format( + i, + watermark_pretrain_loss, + watermark_pretrain_validation_score, + ) + ) + + self.watermark_pretraining_completed = True + + self.next( + self.aggregated_model_validation, + foreach="subset_collaborators", + exclude=["watermark_pretrain_optimizer", "watermark_retrain_optimizer"], + ) + + @collaborator + def aggregated_model_validation(self): + """ + Perform Aggregated Model validation on Collaborators. + """ + self.agg_validation_score = inference(self.model, self.test_loader) + print( + f" Aggregated Model validation score = {self.agg_validation_score}" + ) + + self.next(self.train) + + @collaborator + def train(self): + """ + Train model on Local collab dataset. + + """ + print(": Performing Model Training on Local dataset ... ") + + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + + self.loss = train_model( + self.model, + self.optimizer, + self.train_loader, + ""), + self.round_number if self.round_number is not None else 0, + log=True, + ) + + self.next(self.local_model_validation) + + @collaborator + def local_model_validation(self): + """ + Validate locally trained model. + + """ + self.local_validation_score = inference(self.model, self.test_loader) + print( + f" Local model validation score = {self.local_validation_score}" + ) + self.next(self.join) + + @aggregator + def join(self, inputs): + """ + Model aggregation step. + """ + + self.average_loss = sum(input.loss for input in inputs) / len(inputs) + self.aggregated_model_accuracy = sum( + input.agg_validation_score for input in inputs + ) / len(inputs) + self.local_model_accuracy = sum( + input.local_validation_score for input in inputs + ) / len(inputs) + + print(f": Joining models from collaborators...") + + print( + f" Aggregated model validation score = {self.aggregated_model_accuracy}" + ) + print(f" Average training loss = {self.average_loss}") + print(f" Average local model validation values = {self.local_model_accuracy}") + + self.model = FedAvg(self.model, [input.model for input in inputs]) + + self.next(self.watermark_retrain) + + @aggregator + def watermark_retrain(self): + """ + Retrain the aggregated model. + + """ + print(": Performing Watermark Retraining ... ") + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + + retrain_round = 0 + + # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + while ( + self.watermark_retrain_validation_score < self.watermark_acc_threshold + ) and (retrain_round < self.retrain_epochs): + self.watermark_retrain_train_loss = train_model( + self.model, + self.watermark_retrain_optimizer, + self.watermark_data_loader, + "", + retrain_round, + log=False, + ) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print( + ": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}".format( + self.round_number, + retrain_round, + self.watermark_retrain_train_loss, + self.watermark_retrain_validation_score, + ) + ) + + retrain_round += 1 + + self.next(self.end) + + @aggregator + def end(self): + """ + This is the last step in the Flow. + + """ + print(f"This is the end of the flow") + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 17 +# Set random seed +random_seed = 42 +torch.manual_seed(random_seed) +np.random.seed(random_seed) +torch.backends.cudnn.enabled = False + +# Batch sizes +batch_size_train = 64 +batch_size_test = 64 +batch_size_watermark = 50 + +# MNIST parameters +learning_rate = 5e-2 +momentum = 5e-1 +log_interval = 20 + +# Watermarking parameters +watermark_pretrain_learning_rate = 1e-1 +watermark_pretrain_momentum = 5e-1 +watermark_pretrain_weight_decay = 5e-05 +watermark_retrain_learning_rate = 5e-3 + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 19 +def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size): + return { + "watermark_data_loader": torch.utils.data.DataLoader( + watermark_data, batch_size=batch_size, shuffle=True + ), + "pretrain_epochs": 25, + "retrain_epochs": 25, + "watermark_acc_threshold": 0.98, + } + +# Setup Aggregator private attributes via callable function +aggregator = Aggregator( + name="agg", + private_attributes_callable=callable_to_initialize_aggregator_private_attributes, + watermark_data=watermark_data, + batch_size=batch_size_watermark, + ) + +collaborator_names = [ + "Portland", + "Seattle", + "Chandler", + "Bangalore", + "New Delhi", +] + +def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset): + train = deepcopy(train_dataset) + test = deepcopy(test_dataset) + train.data = train_dataset.data[index::n_collaborators] + train.targets = train_dataset.targets[index::n_collaborators] + test.data = test_dataset.data[index::n_collaborators] + test.targets = test_dataset.targets[index::n_collaborators] + + return { + "train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True), + "test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True), + } + +# Setup Collaborators private attributes via callable function +collaborators = [] +for idx, collaborator_name in enumerate(collaborator_names): + collaborators.append( + Collaborator( + name=collaborator_name, num_cpus=0, num_gpus=0, + private_attributes_callable=callable_to_initialize_collaborator_private_attributes, + index=idx, n_collaborators=len(collaborator_names), + train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64 + ) + ) + +local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="single_process") +print(f"Local runtime collaborators = {local_runtime.collaborators}") + +# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 21 +model = Net() +optimizer = optim.SGD( + model.parameters(), lr=learning_rate, momentum=momentum +) +watermark_pretrain_optimizer = optim.SGD( + model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, +) +watermark_retrain_optimizer = optim.SGD( + model.parameters(), lr=watermark_retrain_learning_rate +) +best_model = None +round_number = 0 +top_model_accuracy = 0 + +flflow = FederatedFlow_MNIST_Watermarking( + model, + optimizer, + watermark_pretrain_optimizer, + watermark_retrain_optimizer, + round_number, + checkpoint=True, +) +flflow.runtime = local_runtime +for i in range(1): + print(f"Starting round {i}...") +# flflow.run() + flflow.round_number += 1 + if hasattr(flflow, "aggregated_model_accuracy"): + aggregated_model_accuracy = flflow.aggregated_model_accuracy + if aggregated_model_accuracy > top_model_accuracy: + print( + f"\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\n" + ) + top_model_accuracy = aggregated_model_accuracy + best_model = flflow.model + + torch.save(best_model.state_dict(), "watermarked_mnist_model.pth") diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py new file mode 100644 index 0000000000..bb4473268c --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py @@ -0,0 +1,73 @@ +# Copyright (C) 2020-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import shutil +import filecmp +from pathlib import Path +from openfl.experimental.workflow.notebooktools import NotebookTools + +# Define paths +NOTEBOOK_PATH = "testcase_export/301_MNIST_Watermarking.ipynb" +ACTUAL_DIR = "testcase_export/test_artifacts/actual" +EXPECTED_DIR = "testcase_export/test_artifacts/expected" + +def setup_workspace(): + """Setup function to create the actual workspace for testing.""" + # Ensure the actual directory is empty + if Path(ACTUAL_DIR).exists(): + shutil.rmtree(ACTUAL_DIR) + Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) + + # Generate workspace using NotebookTools + NotebookTools.export( + notebook_path=NOTEBOOK_PATH, + output_workspace=ACTUAL_DIR + ) + +def compare_files(file1, file2): + """Compare the content of two files, ignoring commentted lines.""" + with open(file1, "r") as f1, open(file2, "r") as f2: + lines1 = f1.readlines() + lines2 = f2.readlines() + + # Remove comment lines (lines starting with '#') + lines1 = [line for line in lines1 if not line.startswith("#")] + lines2 = [line for line in lines2 if not line.startswith("#")] + + return lines1 == lines2 + +def compare_directories(dir1, dir2): + """Compare two directories recursively, including file content.""" + comparison = filecmp.dircmp(dir1, dir2) + # Check for differences in file names or structure + if comparison.left_only or comparison.right_only: + return False + + # Compare subdirectories + for subdir in comparison.common_dirs: + if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): + return False + + # Compare file content for all common files + for file in comparison.common_files: + file1 = Path(dir1) / file + file2 = Path(dir2) / file + print(f"Comparing files: {file1} and {file2}") + if not compare_files(file1, file2): + return False + + return True + +def test_export_functionality(): + """ + Test that the workspace generated by NotebookTools matches the Expected Artifacts. + + This function compares the contents of the actual directory generated by + NotebookTools with the expected directory. + """ + # Compare the expected and actual directories + assert compare_directories(EXPECTED_DIR, ACTUAL_DIR), ( + "The workspace generated by NotebookTools does not match the expected. " + "Check the differences in the test_artifacts/expected and test_artifacts/actual folders." + ) + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb new file mode 100644 index 0000000000..0ee4c67681 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb @@ -0,0 +1,587 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "dc13070c", + "metadata": {}, + "source": [ + "# Federated Runtime: 301_MNIST_Watermarking" + ] + }, + { + "cell_type": "markdown", + "id": "3b7357ef", + "metadata": {}, + "source": [ + "This tutorial is based on the LocalRuntime example [301_MNIST_Watermarking](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb). It has been adapted to demonstrate the FederatedRuntime version of the watermarking workflow. In this tutorial, we will guide you through the process of deploying the watermarking example within a federation, showcasing how to transition from a local setup to a federated environment effectively." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "a4394089", + "metadata": {}, + "source": [ + "# Getting Started" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "857f9995", + "metadata": {}, + "source": [ + "Initially, we start by specifying the module where cells marked with the `#| export` directive will be automatically exported. \n", + "\n", + "In the following cell, `#| default_exp experiment `indicates that the exported file will be named 'experiment'. This name can be modified based on user's requirement & preferences" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d79eacbd", + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp experiment" + ] + }, + { + "cell_type": "markdown", + "id": "62449b5f", + "metadata": {}, + "source": [ + "Once we have specified the name of the module, subsequent cells of the notebook need to be *appended* by the `#| export` directive as shown below. User should ensure that *all* the notebook functionality required in the Federated Learning experiment is included in this directive" + ] + }, + { + "cell_type": "markdown", + "id": "2e19dcf2", + "metadata": {}, + "source": [ + "We start by installing OpenFL and dependencies of the workflow interface \n", + "> These dependencies are required to be exported and become the requirements for the Federated Learning Workspace " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f7475cba", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "!pip install git+https://github.com/securefederatedai/openfl.git\n", + "!pip install -r ../../../workflow_interface_requirements.txt\n", + "!pip install matplotlib\n", + "!pip install torch==2.3.1\n", + "!pip install torchvision==0.18.1\n", + "!pip install git+https://github.com/pyviz-topics/imagen.git@master\n", + "!pip install holoviews==1.15.4\n", + "!pip install -U ipywidgets" + ] + }, + { + "cell_type": "markdown", + "id": "9a6ae8e2", + "metadata": {}, + "source": [ + "We now define our model, optimizer, and some helper functions like we would for any other deep learning experiment \n", + "\n", + "> This cell and all the subsequent cells are important ingredients of the Federated Learning experiment and therefore annotated with the `#| export` directive" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9bd8ac2d", + "metadata": {}, + "outputs": [], + "source": [ + "# | export\n", + "\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "import torch\n", + "import numpy as np\n", + "\n", + "random_seed = 1\n", + "torch.backends.cudnn.enabled = False\n", + "torch.manual_seed(random_seed)\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self, dropout=0.0):\n", + " super(Net, self).__init__()\n", + " self.dropout = dropout\n", + " self.block = nn.Sequential(\n", + " nn.Conv2d(1, 32, 2),\n", + " nn.MaxPool2d(2),\n", + " nn.ReLU(),\n", + " nn.Conv2d(32, 64, 2),\n", + " nn.MaxPool2d(2),\n", + " nn.ReLU(),\n", + " nn.Conv2d(64, 128, 2),\n", + " nn.ReLU(),\n", + " )\n", + " self.fc1 = nn.Linear(128 * 5**2, 200)\n", + " self.fc2 = nn.Linear(200, 10)\n", + " self.relu = nn.ReLU()\n", + " self.dropout = nn.Dropout(p=dropout)\n", + "\n", + " def forward(self, x):\n", + " x = self.dropout(x)\n", + " out = self.block(x)\n", + " out = out.view(-1, 128 * 5**2)\n", + " out = self.dropout(out)\n", + " out = self.relu(self.fc1(out))\n", + " out = self.dropout(out)\n", + " out = self.fc2(out)\n", + " return F.log_softmax(out, 1)\n", + "\n", + "\n", + "def inference(network, test_loader):\n", + " network.eval()\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in test_loader:\n", + " output = network(data)\n", + " pred = output.data.max(1, keepdim=True)[1]\n", + " correct += pred.eq(target.data.view_as(pred)).sum()\n", + " accuracy = float(correct / len(test_loader.dataset))\n", + " return accuracy\n", + "\n", + "\n", + "def train_model(model, optimizer, data_loader, entity, round_number, log=False):\n", + " # Helper function to train the model\n", + " train_loss = 0\n", + " log_interval = 20\n", + " model.train()\n", + " for batch_idx, (X, y) in enumerate(data_loader):\n", + " optimizer.zero_grad()\n", + "\n", + " output = model(X)\n", + " loss = F.nll_loss(output, y)\n", + " loss.backward()\n", + "\n", + " optimizer.step()\n", + "\n", + " train_loss += loss.item() * len(X)\n", + " if batch_idx % log_interval == 0 and log:\n", + " print(\"{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}\".format(\n", + " entity,\n", + " round_number,\n", + " batch_idx * len(X),\n", + " len(data_loader.dataset),\n", + " 100.0 * batch_idx / len(data_loader),\n", + " loss.item(),\n", + " )\n", + " )\n", + " train_loss /= len(data_loader.dataset)\n", + " return train_loss" + ] + }, + { + "cell_type": "markdown", + "id": "d0849d57", + "metadata": {}, + "source": [ + "Next we import the `FLSpec` & placement decorators (`aggregator/collaborator`)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "89cf4866", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "from openfl.experimental.workflow.interface import FLSpec\n", + "from openfl.experimental.workflow.placement import aggregator, collaborator\n", + "\n", + "def FedAvg(agg_model, models, weights=None):\n", + " state_dicts = [model.state_dict() for model in models]\n", + " state_dict = agg_model.state_dict()\n", + " for key in models[0].state_dict():\n", + " state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts],\n", + " axis=0, \n", + " weights=weights))\n", + " \n", + " agg_model.load_state_dict(state_dict)\n", + " return agg_model" + ] + }, + { + "cell_type": "markdown", + "id": "36ed5e31", + "metadata": {}, + "source": [ + "Let us now define the Workflow for Watermark embedding." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52c4a752", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "class FederatedFlow_MNIST_Watermarking(FLSpec):\n", + " \"\"\"\n", + " This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning\n", + " Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298)\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " model=None,\n", + " optimizer=None,\n", + " watermark_pretrain_optimizer=None,\n", + " watermark_retrain_optimizer=None,\n", + " round_number=0,\n", + " n_rounds=3,\n", + " **kwargs,\n", + " ):\n", + " super().__init__(**kwargs)\n", + "\n", + " if model is not None:\n", + " self.model = model\n", + " self.optimizer = optimizer\n", + " self.watermark_pretrain_optimizer = watermark_pretrain_optimizer\n", + " self.watermark_retrain_optimizer = watermark_retrain_optimizer\n", + " else:\n", + " self.model = Net()\n", + " self.optimizer = optim.SGD(\n", + " self.model.parameters(), lr=learning_rate, momentum=momentum\n", + " )\n", + " self.watermark_pretrain_optimizer = optim.SGD(\n", + " self.model.parameters(),\n", + " lr=watermark_pretrain_learning_rate,\n", + " momentum=watermark_pretrain_momentum,\n", + " weight_decay=watermark_pretrain_weight_decay,\n", + " )\n", + " self.watermark_retrain_optimizer = optim.SGD(\n", + " self.model.parameters(), lr=watermark_retrain_learning_rate\n", + " )\n", + " self.round_number = round_number\n", + " self.n_rounds = n_rounds\n", + " self.watermark_pretraining_completed = False\n", + "\n", + " @aggregator\n", + " def start(self):\n", + " \"\"\"\n", + " This is the start of the Flow.\n", + " \"\"\"\n", + " print(\": Start of flow ... \")\n", + " self.collaborators = self.runtime.collaborators\n", + "\n", + " self.next(self.watermark_pretrain)\n", + "\n", + " @aggregator\n", + " def watermark_pretrain(self):\n", + " \"\"\"\n", + " Pre-Train the Model before starting Federated Learning.\n", + " \"\"\"\n", + " if not self.watermark_pretraining_completed:\n", + "\n", + " print(\": Performing Watermark Pre-training\")\n", + "\n", + " for i in range(self.pretrain_epochs):\n", + "\n", + " watermark_pretrain_loss = train_model(\n", + " self.model,\n", + " self.watermark_pretrain_optimizer,\n", + " self.watermark_data_loader,\n", + " \":\",\n", + " i,\n", + " log=False,\n", + " )\n", + " watermark_pretrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + "\n", + " print(f\": Watermark Pretraining: Round: {i:<3}\"\n", + " + f\" Loss: {watermark_pretrain_loss:<.6f}\"\n", + " + f\" Acc: {watermark_pretrain_validation_score:<.6f}\")\n", + "\n", + " self.watermark_pretraining_completed = True\n", + "\n", + " self.next(\n", + " self.aggregated_model_validation,\n", + " foreach=\"collaborators\",\n", + " )\n", + "\n", + " @collaborator\n", + " def aggregated_model_validation(self):\n", + " \"\"\"\n", + " Perform Aggregated Model validation on Collaborators.\n", + " \"\"\"\n", + " self.agg_validation_score = inference(self.model, self.test_loader)\n", + " print(f\"\"\n", + " + f\" Aggregated Model validation score = {self.agg_validation_score}\"\n", + " )\n", + "\n", + " self.next(self.train)\n", + "\n", + " @collaborator\n", + " def train(self):\n", + " \"\"\"\n", + " Train model on Local collab dataset.\n", + " \"\"\"\n", + " print(\": Performing Model Training on Local dataset ... \")\n", + "\n", + " self.optimizer = optim.SGD(\n", + " self.model.parameters(), lr=learning_rate, momentum=momentum\n", + " )\n", + "\n", + " self.loss = train_model(\n", + " self.model,\n", + " self.optimizer,\n", + " self.train_loader,\n", + " f\"\",\n", + " self.round_number,\n", + " log=True,\n", + " )\n", + "\n", + " self.next(self.local_model_validation)\n", + "\n", + " @collaborator\n", + " def local_model_validation(self):\n", + " \"\"\"\n", + " Validate locally trained model.\n", + " \"\"\"\n", + " self.local_validation_score = inference(self.model, self.test_loader)\n", + " print(\n", + " f\" Local model validation score = {self.local_validation_score}\"\n", + " )\n", + " self.next(self.join)\n", + "\n", + " @aggregator\n", + " def join(self, inputs):\n", + " \"\"\"\n", + " Model aggregation step.\n", + " \"\"\"\n", + " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", + " self.aggregated_model_accuracy = sum(\n", + " input.agg_validation_score for input in inputs\n", + " ) / len(inputs)\n", + " self.local_model_accuracy = sum(\n", + " input.local_validation_score for input in inputs\n", + " ) / len(inputs)\n", + "\n", + " print(\": Joining models from collaborators...\")\n", + "\n", + " print(\n", + " f\" Aggregated model validation score = {self.aggregated_model_accuracy}\"\n", + " )\n", + " print(f\" Average training loss = {self.average_loss}\")\n", + " print(f\" Average local model validation values = {self.local_model_accuracy}\")\n", + "\n", + " self.model = FedAvg(self.model, [input.model for input in inputs])\n", + "\n", + " self.next(self.watermark_retrain)\n", + "\n", + " @aggregator\n", + " def watermark_retrain(self):\n", + " \"\"\"\n", + " Retrain the aggregated model.\n", + " \"\"\"\n", + " print(\": Performing Watermark Retraining ... \")\n", + " self.watermark_retrain_optimizer = optim.SGD(\n", + " self.model.parameters(), lr=watermark_retrain_learning_rate\n", + " )\n", + "\n", + " retrain_round = 0\n", + "\n", + " # Perform re-training until (accuracy >= acc_threshold) or\n", + " # (retrain_round > number of retrain_epochs)\n", + " self.watermark_retrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + " while (\n", + " self.watermark_retrain_validation_score < self.watermark_acc_threshold\n", + " ) and (retrain_round < self.retrain_epochs):\n", + " self.watermark_retrain_train_loss = train_model(\n", + " self.model,\n", + " self.watermark_retrain_optimizer,\n", + " self.watermark_data_loader,\n", + " \"\",\n", + " retrain_round,\n", + " log=False,\n", + " )\n", + " self.watermark_retrain_validation_score = inference(\n", + " self.model, self.watermark_data_loader\n", + " )\n", + "\n", + " print(f\": Watermark Retraining: Train Epoch: {self.round_number:<3}\"\n", + " + f\" Retrain Round: {retrain_round:<3}\"\n", + " + f\" Loss: {self.watermark_retrain_train_loss:<.6f},\"\n", + " + f\" Acc: {self.watermark_retrain_validation_score:<.6f}\")\n", + " retrain_round += 1\n", + "\n", + " self.next(self.internal_loop)\n", + " \n", + " @aggregator\n", + " def internal_loop(self):\n", + " \"\"\"\n", + " Internal loop to continue the Federated Learning process.\n", + " \"\"\"\n", + " if self.round_number == self.n_rounds - 1:\n", + " print(f\"\\nCompleted training for all {self.n_rounds} round(s)\")\n", + " self.next(self.end)\n", + " else:\n", + " self.round_number += 1\n", + " print(f\"\\nCompleted round: {self.round_number}\")\n", + " self.next(self.aggregated_model_validation, foreach='collaborators')\n", + "\n", + " @aggregator\n", + " def end(self):\n", + " \"\"\"\n", + " This is the last step in the Flow.\n", + " \"\"\"\n", + " print(\"This is the end of the flow\")" + ] + }, + { + "cell_type": "markdown", + "id": "b5371b6d", + "metadata": {}, + "source": [ + "## Defining and Initializing the Federated Runtime\n", + "We initialize the Federated Runtime by providing:\n", + "- `director_info`: The director's connection information \n", + "- `authorized_collaborators`: A list of authorized collaborators\n", + "- `notebook_path`: Path to this Jupyter notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "1715a373", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "from openfl.experimental.workflow.runtime import FederatedRuntime\n", + "\n", + "director_info = {\n", + " 'director_node_fqdn':'localhost',\n", + " 'director_port':50050,\n", + "}\n", + "\n", + "authorized_collaborators = ['Bangalore', 'Chandler']\n", + "\n", + "federated_runtime = FederatedRuntime(\n", + " collaborators=authorized_collaborators,\n", + " director=director_info, \n", + " notebook_path='./MNIST_Watermarking.ipynb',\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6de9684f", + "metadata": {}, + "source": [ + "The status of the connected Envoys can be checked using the `get_envoys()` method of the `federated_runtime`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f1be87f", + "metadata": {}, + "outputs": [], + "source": [ + "federated_runtime.get_envoys()" + ] + }, + { + "cell_type": "markdown", + "id": "0eaeca25", + "metadata": {}, + "source": [ + "With the federated_runtime now instantiated, we will proceed to deploy the watermarking workspace and run the experiment!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6d19819", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "# Set random seed\n", + "random_seed = 42\n", + "torch.manual_seed(random_seed)\n", + "np.random.seed(random_seed)\n", + "torch.backends.cudnn.enabled = False\n", + "\n", + "# MNIST parameters\n", + "learning_rate = 5e-2\n", + "momentum = 5e-1\n", + "log_interval = 20\n", + "\n", + "# Watermarking parameters\n", + "watermark_pretrain_learning_rate = 1e-1\n", + "watermark_pretrain_momentum = 5e-1\n", + "watermark_pretrain_weight_decay = 5e-05\n", + "watermark_retrain_learning_rate = 5e-3\n", + "\n", + "model = Net()\n", + "optimizer = optim.SGD(\n", + " model.parameters(), lr=learning_rate, momentum=momentum\n", + ")\n", + "watermark_pretrain_optimizer = optim.SGD(\n", + " model.parameters(),\n", + " lr=watermark_pretrain_learning_rate,\n", + " momentum=watermark_pretrain_momentum,\n", + " weight_decay=watermark_pretrain_weight_decay,\n", + ")\n", + "watermark_retrain_optimizer = optim.SGD(\n", + " model.parameters(), lr=watermark_retrain_learning_rate\n", + ")\n", + "\n", + "flflow = FederatedFlow_MNIST_Watermarking(\n", + " model,\n", + " optimizer,\n", + " watermark_pretrain_optimizer,\n", + " watermark_retrain_optimizer,\n", + " checkpoint=True,\n", + ")\n", + "flflow.runtime = federated_runtime\n", + "flflow.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace new file mode 100644 index 0000000000..3c2c5d08b4 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace @@ -0,0 +1,2 @@ +current_plan_name: default + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults new file mode 100644 index 0000000000..fb82f9c5b6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults @@ -0,0 +1,2 @@ +../../workspace/plan/defaults + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml new file mode 100644 index 0000000000..f29bada0f1 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml @@ -0,0 +1,25 @@ +aggregator: + defaults: plan/defaults/aggregator.yaml + settings: + rounds_to_train: 1 + template: openfl.experimental.workflow.component.Aggregator +collaborator: + defaults: plan/defaults/collaborator.yaml + settings: {} + template: openfl.experimental.workflow.component.Collaborator +federated_flow: + settings: + checkpoint: true + model: src.experiment.model + optimizer: src.experiment.optimizer + watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer + watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer + template: src.experiment.FederatedFlow_MNIST_Watermarking +network: + settings: + agg_addr: localhost + agg_port: 53798 + client_reconnect_interval: 5 + disable_client_auth: false + tls: false + template: openfl.federation.Network diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt new file mode 100644 index 0000000000..2a7f08eab8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt @@ -0,0 +1,7 @@ +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability +matplotlib +torch==2.3.1 +torchvision==0.18.1 +git+https://github.com/pyviz-topics/imagen.git@master +holoviews==1.15.4 +ipywidgets diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py new file mode 100644 index 0000000000..49883934a8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py new file mode 100644 index 0000000000..3ac90ade4d --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py @@ -0,0 +1,380 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../../MNIST_Watermarking.ipynb. + +# %% auto 0 +__all__ = ['random_seed', 'director_info', 'authorized_collaborators', 'federated_runtime', 'learning_rate', 'momentum', + 'log_interval', 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', + 'watermark_pretrain_weight_decay', 'watermark_retrain_learning_rate', 'model', 'optimizer', + 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'flflow', 'Net', 'inference', 'train_model', + 'FedAvg', 'FederatedFlow_MNIST_Watermarking'] + +# %% ../../../MNIST_Watermarking.ipynb 7 + +# %% ../../../MNIST_Watermarking.ipynb 9 +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch +import numpy as np + +random_seed = 1 +torch.backends.cudnn.enabled = False +torch.manual_seed(random_seed) + +class Net(nn.Module): + def __init__(self, dropout=0.0): + super(Net, self).__init__() + self.dropout = dropout + self.block = nn.Sequential( + nn.Conv2d(1, 32, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(32, 64, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(64, 128, 2), + nn.ReLU(), + ) + self.fc1 = nn.Linear(128 * 5**2, 200) + self.fc2 = nn.Linear(200, 10) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x): + x = self.dropout(x) + out = self.block(x) + out = out.view(-1, 128 * 5**2) + out = self.dropout(out) + out = self.relu(self.fc1(out)) + out = self.dropout(out) + out = self.fc2(out) + return F.log_softmax(out, 1) + + +def inference(network, test_loader): + network.eval() + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + output = network(data) + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).sum() + accuracy = float(correct / len(test_loader.dataset)) + return accuracy + + +def train_model(model, optimizer, data_loader, entity, round_number, log=False): + # Helper function to train the model + train_loss = 0 + log_interval = 20 + model.train() + for batch_idx, (X, y) in enumerate(data_loader): + optimizer.zero_grad() + + output = model(X) + loss = F.nll_loss(output, y) + loss.backward() + + optimizer.step() + + train_loss += loss.item() * len(X) + if batch_idx % log_interval == 0 and log: + print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( + entity, + round_number, + batch_idx * len(X), + len(data_loader.dataset), + 100.0 * batch_idx / len(data_loader), + loss.item(), + ) + ) + train_loss /= len(data_loader.dataset) + return train_loss + +# %% ../../../MNIST_Watermarking.ipynb 11 +from openfl.experimental.workflow.interface import FLSpec +from openfl.experimental.workflow.placement import aggregator, collaborator + +def FedAvg(agg_model, models, weights=None): + state_dicts = [model.state_dict() for model in models] + state_dict = agg_model.state_dict() + for key in models[0].state_dict(): + state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], + axis=0, + weights=weights)) + + agg_model.load_state_dict(state_dict) + return agg_model + +# %% ../../../MNIST_Watermarking.ipynb 13 +class FederatedFlow_MNIST_Watermarking(FLSpec): + """ + This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning + Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + """ + + def __init__( + self, + model=None, + optimizer=None, + watermark_pretrain_optimizer=None, + watermark_retrain_optimizer=None, + round_number=0, + n_rounds=3, + **kwargs, + ): + super().__init__(**kwargs) + + if model is not None: + self.model = model + self.optimizer = optimizer + self.watermark_pretrain_optimizer = watermark_pretrain_optimizer + self.watermark_retrain_optimizer = watermark_retrain_optimizer + else: + self.model = Net() + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + self.watermark_pretrain_optimizer = optim.SGD( + self.model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, + ) + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + self.round_number = round_number + self.n_rounds = n_rounds + self.watermark_pretraining_completed = False + + @aggregator + def start(self): + """ + This is the start of the Flow. + """ + print(": Start of flow ... ") + self.collaborators = self.runtime.collaborators + + self.next(self.watermark_pretrain) + + @aggregator + def watermark_pretrain(self): + """ + Pre-Train the Model before starting Federated Learning. + """ + if not self.watermark_pretraining_completed: + + print(": Performing Watermark Pre-training") + + for i in range(self.pretrain_epochs): + + watermark_pretrain_loss = train_model( + self.model, + self.watermark_pretrain_optimizer, + self.watermark_data_loader, + ":", + i, + log=False, + ) + watermark_pretrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print(f": Watermark Pretraining: Round: {i:<3}" + + f" Loss: {watermark_pretrain_loss:<.6f}" + + f" Acc: {watermark_pretrain_validation_score:<.6f}") + + self.watermark_pretraining_completed = True + + self.next( + self.aggregated_model_validation, + foreach="collaborators", + ) + + @collaborator + def aggregated_model_validation(self): + """ + Perform Aggregated Model validation on Collaborators. + """ + self.agg_validation_score = inference(self.model, self.test_loader) + print(f"" + + f" Aggregated Model validation score = {self.agg_validation_score}" + ) + + self.next(self.train) + + @collaborator + def train(self): + """ + Train model on Local collab dataset. + """ + print(": Performing Model Training on Local dataset ... ") + + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + + self.loss = train_model( + self.model, + self.optimizer, + self.train_loader, + f"", + self.round_number, + log=True, + ) + + self.next(self.local_model_validation) + + @collaborator + def local_model_validation(self): + """ + Validate locally trained model. + """ + self.local_validation_score = inference(self.model, self.test_loader) + print( + f" Local model validation score = {self.local_validation_score}" + ) + self.next(self.join) + + @aggregator + def join(self, inputs): + """ + Model aggregation step. + """ + self.average_loss = sum(input.loss for input in inputs) / len(inputs) + self.aggregated_model_accuracy = sum( + input.agg_validation_score for input in inputs + ) / len(inputs) + self.local_model_accuracy = sum( + input.local_validation_score for input in inputs + ) / len(inputs) + + print(": Joining models from collaborators...") + + print( + f" Aggregated model validation score = {self.aggregated_model_accuracy}" + ) + print(f" Average training loss = {self.average_loss}") + print(f" Average local model validation values = {self.local_model_accuracy}") + + self.model = FedAvg(self.model, [input.model for input in inputs]) + + self.next(self.watermark_retrain) + + @aggregator + def watermark_retrain(self): + """ + Retrain the aggregated model. + """ + print(": Performing Watermark Retraining ... ") + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + + retrain_round = 0 + + # Perform re-training until (accuracy >= acc_threshold) or + # (retrain_round > number of retrain_epochs) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + while ( + self.watermark_retrain_validation_score < self.watermark_acc_threshold + ) and (retrain_round < self.retrain_epochs): + self.watermark_retrain_train_loss = train_model( + self.model, + self.watermark_retrain_optimizer, + self.watermark_data_loader, + "", + retrain_round, + log=False, + ) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print(f": Watermark Retraining: Train Epoch: {self.round_number:<3}" + + f" Retrain Round: {retrain_round:<3}" + + f" Loss: {self.watermark_retrain_train_loss:<.6f}," + + f" Acc: {self.watermark_retrain_validation_score:<.6f}") + retrain_round += 1 + + self.next(self.internal_loop) + + @aggregator + def internal_loop(self): + """ + Internal loop to continue the Federated Learning process. + """ + if self.round_number == self.n_rounds - 1: + print(f"\nCompleted training for all {self.n_rounds} round(s)") + self.next(self.end) + else: + self.round_number += 1 + print(f"\nCompleted round: {self.round_number}") + self.next(self.aggregated_model_validation, foreach='collaborators') + + @aggregator + def end(self): + """ + This is the last step in the Flow. + """ + print("This is the end of the flow") + +# %% ../../../MNIST_Watermarking.ipynb 15 +from openfl.experimental.workflow.runtime import FederatedRuntime + +director_info = { + 'director_node_fqdn':'localhost', + 'director_port':50050, +} + +authorized_collaborators = ['Bangalore', 'Chandler'] + +federated_runtime = FederatedRuntime( + collaborators=authorized_collaborators, + director=director_info, + notebook_path='./MNIST_Watermarking.ipynb', +) + +# %% ../../../MNIST_Watermarking.ipynb 19 +# Set random seed +random_seed = 42 +torch.manual_seed(random_seed) +np.random.seed(random_seed) +torch.backends.cudnn.enabled = False + +# MNIST parameters +learning_rate = 5e-2 +momentum = 5e-1 +log_interval = 20 + +# Watermarking parameters +watermark_pretrain_learning_rate = 1e-1 +watermark_pretrain_momentum = 5e-1 +watermark_pretrain_weight_decay = 5e-05 +watermark_retrain_learning_rate = 5e-3 + +model = Net() +optimizer = optim.SGD( + model.parameters(), lr=learning_rate, momentum=momentum +) +watermark_pretrain_optimizer = optim.SGD( + model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, +) +watermark_retrain_optimizer = optim.SGD( + model.parameters(), lr=watermark_retrain_learning_rate +) + +flflow = FederatedFlow_MNIST_Watermarking( + model, + optimizer, + watermark_pretrain_optimizer, + watermark_retrain_optimizer, + checkpoint=True, +) +flflow.runtime = federated_runtime +# flflow.run() diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace new file mode 100644 index 0000000000..3c2c5d08b4 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace @@ -0,0 +1,2 @@ +current_plan_name: default + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults new file mode 100644 index 0000000000..fb82f9c5b6 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults @@ -0,0 +1,2 @@ +../../workspace/plan/defaults + diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml new file mode 100644 index 0000000000..f29bada0f1 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml @@ -0,0 +1,25 @@ +aggregator: + defaults: plan/defaults/aggregator.yaml + settings: + rounds_to_train: 1 + template: openfl.experimental.workflow.component.Aggregator +collaborator: + defaults: plan/defaults/collaborator.yaml + settings: {} + template: openfl.experimental.workflow.component.Collaborator +federated_flow: + settings: + checkpoint: true + model: src.experiment.model + optimizer: src.experiment.optimizer + watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer + watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer + template: src.experiment.FederatedFlow_MNIST_Watermarking +network: + settings: + agg_addr: localhost + agg_port: 53798 + client_reconnect_interval: 5 + disable_client_auth: false + tls: false + template: openfl.federation.Network diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt new file mode 100644 index 0000000000..2a7f08eab8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt @@ -0,0 +1,7 @@ +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability +matplotlib +torch==2.3.1 +torchvision==0.18.1 +git+https://github.com/pyviz-topics/imagen.git@master +holoviews==1.15.4 +ipywidgets diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py new file mode 100644 index 0000000000..49883934a8 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py new file mode 100644 index 0000000000..bfca717881 --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py @@ -0,0 +1,380 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../MNIST_Watermarking.ipynb. + +# %% auto 0 +__all__ = ['random_seed', 'director_info', 'authorized_collaborators', 'federated_runtime', 'learning_rate', 'momentum', + 'log_interval', 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', + 'watermark_pretrain_weight_decay', 'watermark_retrain_learning_rate', 'model', 'optimizer', + 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'flflow', 'Net', 'inference', 'train_model', + 'FedAvg', 'FederatedFlow_MNIST_Watermarking'] + +# %% ../../MNIST_Watermarking.ipynb 7 + +# %% ../../MNIST_Watermarking.ipynb 9 +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch +import numpy as np + +random_seed = 1 +torch.backends.cudnn.enabled = False +torch.manual_seed(random_seed) + +class Net(nn.Module): + def __init__(self, dropout=0.0): + super(Net, self).__init__() + self.dropout = dropout + self.block = nn.Sequential( + nn.Conv2d(1, 32, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(32, 64, 2), + nn.MaxPool2d(2), + nn.ReLU(), + nn.Conv2d(64, 128, 2), + nn.ReLU(), + ) + self.fc1 = nn.Linear(128 * 5**2, 200) + self.fc2 = nn.Linear(200, 10) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x): + x = self.dropout(x) + out = self.block(x) + out = out.view(-1, 128 * 5**2) + out = self.dropout(out) + out = self.relu(self.fc1(out)) + out = self.dropout(out) + out = self.fc2(out) + return F.log_softmax(out, 1) + + +def inference(network, test_loader): + network.eval() + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + output = network(data) + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).sum() + accuracy = float(correct / len(test_loader.dataset)) + return accuracy + + +def train_model(model, optimizer, data_loader, entity, round_number, log=False): + # Helper function to train the model + train_loss = 0 + log_interval = 20 + model.train() + for batch_idx, (X, y) in enumerate(data_loader): + optimizer.zero_grad() + + output = model(X) + loss = F.nll_loss(output, y) + loss.backward() + + optimizer.step() + + train_loss += loss.item() * len(X) + if batch_idx % log_interval == 0 and log: + print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( + entity, + round_number, + batch_idx * len(X), + len(data_loader.dataset), + 100.0 * batch_idx / len(data_loader), + loss.item(), + ) + ) + train_loss /= len(data_loader.dataset) + return train_loss + +# %% ../../MNIST_Watermarking.ipynb 11 +from openfl.experimental.workflow.interface import FLSpec +from openfl.experimental.workflow.placement import aggregator, collaborator + +def FedAvg(agg_model, models, weights=None): + state_dicts = [model.state_dict() for model in models] + state_dict = agg_model.state_dict() + for key in models[0].state_dict(): + state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], + axis=0, + weights=weights)) + + agg_model.load_state_dict(state_dict) + return agg_model + +# %% ../../MNIST_Watermarking.ipynb 13 +class FederatedFlow_MNIST_Watermarking(FLSpec): + """ + This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning + Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) + """ + + def __init__( + self, + model=None, + optimizer=None, + watermark_pretrain_optimizer=None, + watermark_retrain_optimizer=None, + round_number=0, + n_rounds=3, + **kwargs, + ): + super().__init__(**kwargs) + + if model is not None: + self.model = model + self.optimizer = optimizer + self.watermark_pretrain_optimizer = watermark_pretrain_optimizer + self.watermark_retrain_optimizer = watermark_retrain_optimizer + else: + self.model = Net() + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + self.watermark_pretrain_optimizer = optim.SGD( + self.model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, + ) + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + self.round_number = round_number + self.n_rounds = n_rounds + self.watermark_pretraining_completed = False + + @aggregator + def start(self): + """ + This is the start of the Flow. + """ + print(": Start of flow ... ") + self.collaborators = self.runtime.collaborators + + self.next(self.watermark_pretrain) + + @aggregator + def watermark_pretrain(self): + """ + Pre-Train the Model before starting Federated Learning. + """ + if not self.watermark_pretraining_completed: + + print(": Performing Watermark Pre-training") + + for i in range(self.pretrain_epochs): + + watermark_pretrain_loss = train_model( + self.model, + self.watermark_pretrain_optimizer, + self.watermark_data_loader, + ":", + i, + log=False, + ) + watermark_pretrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print(f": Watermark Pretraining: Round: {i:<3}" + + f" Loss: {watermark_pretrain_loss:<.6f}" + + f" Acc: {watermark_pretrain_validation_score:<.6f}") + + self.watermark_pretraining_completed = True + + self.next( + self.aggregated_model_validation, + foreach="collaborators", + ) + + @collaborator + def aggregated_model_validation(self): + """ + Perform Aggregated Model validation on Collaborators. + """ + self.agg_validation_score = inference(self.model, self.test_loader) + print(f"" + + f" Aggregated Model validation score = {self.agg_validation_score}" + ) + + self.next(self.train) + + @collaborator + def train(self): + """ + Train model on Local collab dataset. + """ + print(": Performing Model Training on Local dataset ... ") + + self.optimizer = optim.SGD( + self.model.parameters(), lr=learning_rate, momentum=momentum + ) + + self.loss = train_model( + self.model, + self.optimizer, + self.train_loader, + f"", + self.round_number, + log=True, + ) + + self.next(self.local_model_validation) + + @collaborator + def local_model_validation(self): + """ + Validate locally trained model. + """ + self.local_validation_score = inference(self.model, self.test_loader) + print( + f" Local model validation score = {self.local_validation_score}" + ) + self.next(self.join) + + @aggregator + def join(self, inputs): + """ + Model aggregation step. + """ + self.average_loss = sum(input.loss for input in inputs) / len(inputs) + self.aggregated_model_accuracy = sum( + input.agg_validation_score for input in inputs + ) / len(inputs) + self.local_model_accuracy = sum( + input.local_validation_score for input in inputs + ) / len(inputs) + + print(": Joining models from collaborators...") + + print( + f" Aggregated model validation score = {self.aggregated_model_accuracy}" + ) + print(f" Average training loss = {self.average_loss}") + print(f" Average local model validation values = {self.local_model_accuracy}") + + self.model = FedAvg(self.model, [input.model for input in inputs]) + + self.next(self.watermark_retrain) + + @aggregator + def watermark_retrain(self): + """ + Retrain the aggregated model. + """ + print(": Performing Watermark Retraining ... ") + self.watermark_retrain_optimizer = optim.SGD( + self.model.parameters(), lr=watermark_retrain_learning_rate + ) + + retrain_round = 0 + + # Perform re-training until (accuracy >= acc_threshold) or + # (retrain_round > number of retrain_epochs) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + while ( + self.watermark_retrain_validation_score < self.watermark_acc_threshold + ) and (retrain_round < self.retrain_epochs): + self.watermark_retrain_train_loss = train_model( + self.model, + self.watermark_retrain_optimizer, + self.watermark_data_loader, + "", + retrain_round, + log=False, + ) + self.watermark_retrain_validation_score = inference( + self.model, self.watermark_data_loader + ) + + print(f": Watermark Retraining: Train Epoch: {self.round_number:<3}" + + f" Retrain Round: {retrain_round:<3}" + + f" Loss: {self.watermark_retrain_train_loss:<.6f}," + + f" Acc: {self.watermark_retrain_validation_score:<.6f}") + retrain_round += 1 + + self.next(self.internal_loop) + + @aggregator + def internal_loop(self): + """ + Internal loop to continue the Federated Learning process. + """ + if self.round_number == self.n_rounds - 1: + print(f"\nCompleted training for all {self.n_rounds} round(s)") + self.next(self.end) + else: + self.round_number += 1 + print(f"\nCompleted round: {self.round_number}") + self.next(self.aggregated_model_validation, foreach='collaborators') + + @aggregator + def end(self): + """ + This is the last step in the Flow. + """ + print("This is the end of the flow") + +# %% ../../MNIST_Watermarking.ipynb 15 +from openfl.experimental.workflow.runtime import FederatedRuntime + +director_info = { + 'director_node_fqdn':'localhost', + 'director_port':50050, +} + +authorized_collaborators = ['Bangalore', 'Chandler'] + +federated_runtime = FederatedRuntime( + collaborators=authorized_collaborators, + director=director_info, + notebook_path='./MNIST_Watermarking.ipynb', +) + +# %% ../../MNIST_Watermarking.ipynb 19 +# Set random seed +random_seed = 42 +torch.manual_seed(random_seed) +np.random.seed(random_seed) +torch.backends.cudnn.enabled = False + +# MNIST parameters +learning_rate = 5e-2 +momentum = 5e-1 +log_interval = 20 + +# Watermarking parameters +watermark_pretrain_learning_rate = 1e-1 +watermark_pretrain_momentum = 5e-1 +watermark_pretrain_weight_decay = 5e-05 +watermark_retrain_learning_rate = 5e-3 + +model = Net() +optimizer = optim.SGD( + model.parameters(), lr=learning_rate, momentum=momentum +) +watermark_pretrain_optimizer = optim.SGD( + model.parameters(), + lr=watermark_pretrain_learning_rate, + momentum=watermark_pretrain_momentum, + weight_decay=watermark_pretrain_weight_decay, +) +watermark_retrain_optimizer = optim.SGD( + model.parameters(), lr=watermark_retrain_learning_rate +) + +flflow = FederatedFlow_MNIST_Watermarking( + model, + optimizer, + watermark_pretrain_optimizer, + watermark_retrain_optimizer, + checkpoint=True, +) +flflow.runtime = federated_runtime +# flflow.run() diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py new file mode 100644 index 0000000000..901e403dfa --- /dev/null +++ b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py @@ -0,0 +1,95 @@ +# Copyright (C) 2020-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import shutil +import filecmp +from pathlib import Path +from openfl.experimental.workflow.runtime import FederatedRuntime +from openfl.experimental.workflow.notebooktools import NotebookTools + +# Define paths +NOTEBOOK_PATH = "testcase_export_federated/MNIST_Watermarking.ipynb" +ACTUAL_DIR = "testcase_export_federated/test_artifacts/actual" +EXPECTED_DIR = "testcase_export_federated/test_artifacts/expected" + +# Setup for FederatedRuntime +director_info = { + 'director_node_fqdn': 'localhost', + 'director_port': 50050, +} + +authorized_collaborators = ['Bangalore', 'Chandler'] + +# Creating an instance of FederatedRuntime +federated_runtime = FederatedRuntime( + collaborators=authorized_collaborators, + director=director_info, + notebook_path=NOTEBOOK_PATH, + tls=False # Actual testcase tls is set to false +) + +def setup_workspace(): + """Setup function to create the actual workspace for testing.""" + # Ensure the actual directory is empty + if Path(ACTUAL_DIR).exists(): + shutil.rmtree(ACTUAL_DIR) + Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) + + # Use the FederatedRuntime instance to get the parameters + notebook_path = federated_runtime.notebook_path + director_fqdn = federated_runtime.director["director_node_fqdn"] + tls = federated_runtime.tls + + # Generate workspace using NotebookTools + NotebookTools.export_federated( + notebook_path=notebook_path, + output_workspace=ACTUAL_DIR, + director_fqdn=director_fqdn, + tls=tls + ) + +def compare_files(file1, file2): + """Compare the content of two files, ignoring comment lines (lines starting with '#').""" + with open(file1, "r") as f1, open(file2, "r") as f2: + lines1 = f1.readlines() + lines2 = f2.readlines() + + # Remove comment lines (lines starting with '#') + lines1 = [line for line in lines1 if not line.startswith("#")] + lines2 = [line for line in lines2 if not line.startswith("#")] + + return lines1 == lines2 + +def compare_directories(dir1, dir2): + """Compare two directories recursively, including file content.""" + comparison = filecmp.dircmp(dir1, dir2) + + # Check for differences in file names or structure + if comparison.left_only or comparison.right_only: + return False + + # Compare subdirectories + for subdir in comparison.common_dirs: + if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): + return False + + # Compare file content for all common files + for file in comparison.common_files: + file1 = Path(dir1) / file + file2 = Path(dir2) / file + if not compare_files(file1, file2): + return False + + return True + +def test_export_federated_functionality(): + """Test that the workspace generated by NotebookTools matches the Expected Artifacts. + + This function compares the contents of the actual directory generated by + NotebookTools with the expected directory. + """ + # Compare the expected and actual directories + assert compare_directories(EXPECTED_DIR, ACTUAL_DIR), ( + "The workspace generated by NotebookTools does not match the expected. " + "Check the differences in the test_artifacts/expected and test_artifacts/actual folders." + ) \ No newline at end of file From 43faf7873e9c6af7dcd11b69cd37da94106c4be0 Mon Sep 17 00:00:00 2001 From: refai06 Date: Wed, 19 Feb 2025 10:21:40 +0530 Subject: [PATCH 5/7] Code Enhancement Signed-off-by: refai06 --- .../workflow/notebooktools/code_analyzer.py | 36 +++--- .../workflow/notebooktools/notebook_tools.py | 114 ++++++++++++------ 2 files changed, 95 insertions(+), 55 deletions(-) diff --git a/openfl/experimental/workflow/notebooktools/code_analyzer.py b/openfl/experimental/workflow/notebooktools/code_analyzer.py index 2a065c52fc..d0677cef71 100644 --- a/openfl/experimental/workflow/notebooktools/code_analyzer.py +++ b/openfl/experimental/workflow/notebooktools/code_analyzer.py @@ -17,7 +17,8 @@ class CodeAnalyzer: - """Code analysis and transformation functionality for NotebookTools + """Analyzes and process Jupyter Notebooks. + Provides code extraction and transformation functionality to NotebookTools Attributes: script_path: Absolute path to python script. @@ -28,18 +29,14 @@ def __init__(self, notebook_path: Path, output_path: Path) -> None: """Initialize CodeAnalzer and process the script from notebook Args: - notebook_path (Path): The path to the Jupyter notebook that needs to be converted. + notebook_path (Path): Path to Jupyter notebook to be converted. output_path (Path): The directory where the converted Python script will be saved. """ logger.info("Converting jupter notebook to python script...") # Extract the export filename from the notebook export_filename = self.__get_exp_name(notebook_path) - if export_filename is None: - raise NameError( - "Please include `#| default_exp ` in " - "the first cell of the notebook." - ) + # Convert the notebook to a Python script and set the script path self.script_path = Path( self.__convert_to_python( @@ -51,15 +48,14 @@ def __init__(self, notebook_path: Path, output_path: Path) -> None: # Generated python script name self.script_name = self.script_path.name.split(".")[0].strip() - # Comment out flow.run() to prevent the flow from starting execution - # automatically when the script is imported. - self.__comment_flow_execution() - - # Change the runtime backend from 'ray' to 'single_process' - self.__change_runtime() + # Transform the script + self._transform_script() def __get_exp_name(self, notebook_path: Path) -> str: - """Fetch the experiment name from the Jupyter notebook. + """Extract experiment name from Jupyter notebook + Looks for '#| default_exp ' pattern in code cells + and extracts the experiment name. The name must be a valid Python identifier. + Args: notebook_path (str): Path to Jupyter notebook. """ @@ -94,6 +90,16 @@ def __convert_to_python(self, notebook_path: Path, output_path: Path, export_fil return Path(output_path).joinpath(export_filename).resolve() + def _transform_script(self) -> None: + """ + Transform the script by commenting out flow.run() and changing the runtime backend. + """ + # Comment out flow.run() to prevent the flow from starting execution + self.__comment_flow_execution() + + # Change the runtime backend from 'ray' to 'single_process' + self.__change_runtime() + def __comment_flow_execution(self) -> None: """Comment out lines containing '.run()' in the specified Python script""" with open(self.script_path, "r") as f: @@ -380,7 +386,7 @@ def get_flow_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: flow_class_name (str): The name of the federated flow class to retrieve. Returns: - tuple: A tuple containing the runtime instance and the flow class name. + tuple: A tuple containing the runtime instance and the flow name. """ if not hasattr(self, "exported_script_module"): self.__import_exported_script() diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py index f75cb2a6cf..ab9167105d 100644 --- a/openfl/experimental/workflow/notebooktools/notebook_tools.py +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -8,7 +8,7 @@ from logging import getLogger from pathlib import Path from shutil import copytree -from typing import Tuple +from typing import Any, Dict, Tuple from openfl.experimental.workflow.federated.plan import Plan from openfl.experimental.workflow.interface.cli.cli_helper import print_tree @@ -25,16 +25,15 @@ class NotebookTools: notebook_path: Absolute path of jupyter notebook. template_workspace_path: Path to template workspace provided with OpenFL. - output_workspace_path: Output directory for new generated workspace - (default="/tmp"). + output_workspace_path: Output directory for new generated workspace. + code_analyzer: An instance of the CodeAnalyzer class for analyzing notebook code. """ def __init__(self, notebook_path: str, output_workspace: str) -> None: """Initialize a NotebookTools object. Args: - notebook_path (str): The path to the Jupyter notebook that needs to be converted. - output_workspace (str): The directory where the converted workspace will be saved - workspace + notebook_path (str): Path to Jupyter notebook to be converted. + output_workspace (str): Target directory for generated workspace """ self.notebook_path = Path(notebook_path).resolve() # Check if the Jupyter notebook exists @@ -125,18 +124,27 @@ def _generate_requirements(self) -> None: """Extracts pip libraries from exported python script and append in workspace/requirements.txt """ - requirements, line_numbers, data = self.code_analyzer.get_requirements() + try: + # Get requirements and related data from the code analyzer + requirements, line_numbers, data = self.code_analyzer.get_requirements() - requirements_filepath = str( - self.output_workspace_path.joinpath("requirements.txt").resolve() - ) + # Define the path for the requirements.txt file + requirements_filepath = str( + self.output_workspace_path.joinpath("requirements.txt").resolve() + ) + + # Write libraries found in requirements.txt + with open(requirements_filepath, "a") as f: + f.writelines(requirements) + + # Delete pip requirements from the python script to ensure it can be imported + self.code_analyzer.remove_lines(data, line_numbers) - # Write libraries found in requirements.txt - with open(requirements_filepath, "a") as f: - f.writelines(requirements) + logger.info(f"Successfully generated {requirements_filepath}") - # Delete pip requirements from the python script to ensure it can be imported - self.code_analyzer.remove_lines(data, line_numbers) + except Exception as e: + # Log error message with exception details + logger.error(f"Failed to generate requirements: {e}") def _clean_generated_workspace(self) -> None: """ @@ -169,56 +177,67 @@ def _generate_plan_yaml(self, director_fqdn: str = None, tls: bool = False) -> N flow_config = self.code_analyzer.fetch_flow_configuration(flow_details) # Determine the path for the plan.yaml file - plan = self.output_workspace_path.joinpath("plan", "plan.yaml").resolve() + plan_path = self.output_workspace_path.joinpath("plan", "plan.yaml").resolve() - # Initialize the YAML data - data = self._initialize_plan_yaml(plan) + # Build the complete plan configuration + data_config = self._build_plan_config(flow_config, director_fqdn, tls, plan_path) + + # Write the updated plan configuraiton to the plan.yaml file + Plan.dump(plan_path, data_config) + + def _build_plan_config( + self, flow_config: Dict[str, Any], director_fqdn: str, tls: bool, plan_path: Path + ) -> Dict[str, Any]: + """ + Build plan configuration with validation. + + Args: + flow_config: Flow configuration dictionary + director_fqdn: Director's FQDN + tls: TLS setting + plan_path: Path to plan.yaml - # Update the plan_configuration with the analyzed flow configuration - data["federated_flow"].update(flow_config["federated_flow"]) + Returns: + Dict[str, Any]: Complete plan configuration + """ + data_config = self._initialize_plan_yaml(plan_path) + data_config["federated_flow"].update(flow_config["federated_flow"]) - # Updating the aggregator address with director's hostname and tls settings in plan.yaml if director_fqdn: - network_settings = Plan.parse(plan).config["network"] - data["network"] = network_settings - data["network"]["settings"]["agg_addr"] = director_fqdn - data["network"]["settings"]["tls"] = tls + network_settings = Plan.parse(plan_path).config["network"] + data_config["network"] = network_settings + data_config["network"]["settings"]["agg_addr"] = director_fqdn + data_config["network"]["settings"]["tls"] = tls - # Write the updated plan configuraiton to the plan.yaml file - Plan.dump(plan, data) + return data_config def _generate_data_yaml(self) -> None: """Generate data.yaml""" - # Get flow class_name - if not hasattr(self, "flow_class_name"): - flow_details = self._extract_flow_details() - self.flow_class_name = flow_details["flow_class_name"] - - # Get runtime information using CodeAnalyzer - runtime, flow_instance_name = self.code_analyzer.get_flow_runtime_info(self.flow_class_name) + # Get runtime information + runtime, flow_instance_name = self._get_runtime_info() # Determine the path for the data.yaml data_yaml = self.output_workspace_path.joinpath("plan", "data.yaml").resolve() # Initialize the YAML data - data = self._initialize_data_yaml(data_yaml) + data_config = self._initialize_data_yaml(data_yaml) # Initiaize runtime name runtime_name = "runtime_local" # Process aggregator information using CodeAnalyzer runtime_created = self.code_analyzer.process_aggregator( - runtime, data, flow_instance_name, runtime_name + runtime, data_config, flow_instance_name, runtime_name ) # Process collaborator information using CodeAnalyzer self.code_analyzer.process_collaborators( - runtime, data, flow_instance_name, runtime_created, runtime_name + runtime, data_config, flow_instance_name, runtime_created, runtime_name ) # Write updated data configuration to the data.yaml file - Plan.dump(data_yaml, data) + Plan.dump(data_yaml, data_config) def _extract_flow_details(self) -> str: """Extract the flow class details""" @@ -228,7 +247,22 @@ def _extract_flow_details(self) -> str: raise ValueError("Failed to extract flow class details") return flow_details - def _initialize_plan_yaml(self, plan_yaml) -> dict: + def _get_runtime_info(self) -> Tuple[object, str]: + """ + Get runtime information for the flow class. + + Returns: + Tuple[object, str]: A tuple containing the runtime and flow instance name. + """ + if not hasattr(self, "flow_class_name"): + flow_details = self._extract_flow_details() + self.flow_class_name = flow_details["flow_class_name"] + + # Get runtime information using CodeAnalyzer + runtime, flow_instance_name = self.code_analyzer.get_flow_runtime_info(self.flow_class_name) + return runtime, flow_instance_name + + def _initialize_plan_yaml(self, plan_yaml: Path) -> dict: """Load or initialize the plan YAML data. Args: plan_yaml (Path): The path to the plan.yaml file. @@ -242,7 +276,7 @@ def _initialize_plan_yaml(self, plan_yaml) -> dict: data["federated_flow"] = {"settings": {}, "template": ""} return data - def _initialize_data_yaml(self, data_yaml) -> dict: + def _initialize_data_yaml(self, data_yaml: Path) -> dict: """Load or initialize the YAML data. Args: data_yaml (Path): The path to the data.yaml file. From 2d4093bee369c293b2d51808423ad2f669c7022e Mon Sep 17 00:00:00 2001 From: refai06 Date: Thu, 20 Feb 2025 12:40:56 +0530 Subject: [PATCH 6/7] Docstring & loggers update Signed-off-by: refai06 --- .../workflow/notebooktools/code_analyzer.py | 14 +++++++------- .../workflow/notebooktools/notebook_tools.py | 14 +++++++++----- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/openfl/experimental/workflow/notebooktools/code_analyzer.py b/openfl/experimental/workflow/notebooktools/code_analyzer.py index d0677cef71..d1badbfe4e 100644 --- a/openfl/experimental/workflow/notebooktools/code_analyzer.py +++ b/openfl/experimental/workflow/notebooktools/code_analyzer.py @@ -49,7 +49,7 @@ def __init__(self, notebook_path: Path, output_path: Path) -> None: self.script_name = self.script_path.name.split(".")[0].strip() # Transform the script - self._transform_script() + self.__transform_script() def __get_exp_name(self, notebook_path: Path) -> str: """Extract experiment name from Jupyter notebook @@ -90,7 +90,7 @@ def __convert_to_python(self, notebook_path: Path, output_path: Path, export_fil return Path(output_path).joinpath(export_filename).resolve() - def _transform_script(self) -> None: + def __transform_script(self) -> None: """ Transform the script by commenting out flow.run() and changing the runtime backend. """ @@ -380,13 +380,13 @@ def update_dictionary(args: dict, dtype: str = "args") -> None: return flow_config - def get_flow_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: - """Get federated flow class and runtime information. + def fetch_flow_runtime_info(self, flow_class_name: str) -> Tuple[object, str]: + """Fetch the federated flow class, its runtime information and flow instance name Args: - flow_class_name (str): The name of the federated flow class to retrieve. + flow_class_name (str): The name of the federated flow class. Returns: - tuple: A tuple containing the runtime instance and the flow name. + tuple: A tuple containing the runtime instance and the flow instance name. """ if not hasattr(self, "exported_script_module"): self.__import_exported_script() @@ -401,7 +401,7 @@ def _find_flow_instance_runtime(self, federated_flow_class) -> Tuple[str, object federated_flow_class: The class object of the federated flow. Returns: - tuple: A tuple containing the name of the flow instance and the runtime instance. + tuple: A tuple containing the name of the flow instance and the runtime. """ for t in self.available_modules_in_exported_script: tempstring = t diff --git a/openfl/experimental/workflow/notebooktools/notebook_tools.py b/openfl/experimental/workflow/notebooktools/notebook_tools.py index ab9167105d..8fde19e3b9 100644 --- a/openfl/experimental/workflow/notebooktools/notebook_tools.py +++ b/openfl/experimental/workflow/notebooktools/notebook_tools.py @@ -3,6 +3,7 @@ """Notebook Tools module.""" +import logging import shutil from importlib import import_module from logging import getLogger @@ -14,6 +15,7 @@ from openfl.experimental.workflow.interface.cli.cli_helper import print_tree from openfl.experimental.workflow.notebooktools.code_analyzer import CodeAnalyzer +logging.basicConfig(level=logging.INFO, format="%(message)s") logger = getLogger(__name__) @@ -215,7 +217,7 @@ def _generate_data_yaml(self) -> None: """Generate data.yaml""" # Get runtime information - runtime, flow_instance_name = self._get_runtime_info() + runtime, flow_instance_name = self._get_flow_runtime() # Determine the path for the data.yaml data_yaml = self.output_workspace_path.joinpath("plan", "data.yaml").resolve() @@ -247,9 +249,9 @@ def _extract_flow_details(self) -> str: raise ValueError("Failed to extract flow class details") return flow_details - def _get_runtime_info(self) -> Tuple[object, str]: + def _get_flow_runtime(self) -> Tuple[object, str]: """ - Get runtime information for the flow class. + Get the runtime and flow instance name using CodeAnalyzer Returns: Tuple[object, str]: A tuple containing the runtime and flow instance name. @@ -258,8 +260,10 @@ def _get_runtime_info(self) -> Tuple[object, str]: flow_details = self._extract_flow_details() self.flow_class_name = flow_details["flow_class_name"] - # Get runtime information using CodeAnalyzer - runtime, flow_instance_name = self.code_analyzer.get_flow_runtime_info(self.flow_class_name) + # Get runtime information and flow instance name using CodeAnalyzer + runtime, flow_instance_name = self.code_analyzer.fetch_flow_runtime_info( + self.flow_class_name + ) return runtime, flow_instance_name def _initialize_plan_yaml(self, plan_yaml: Path) -> dict: From 208c8f9956d567f367417730d3bf52ca7d48289c Mon Sep 17 00:00:00 2001 From: refai06 Date: Thu, 20 Feb 2025 17:54:47 +0530 Subject: [PATCH 7/7] Added NotebookTools module Testcase Signed-off-by: refai06 --- .../301_MNIST_Watermarking.ipynb | 924 ------------------ .../test_artifacts/actual/plan/data.yaml | 51 - .../test_artifacts/actual/plan/plan.yaml | 20 - .../test_artifacts/actual/requirements.txt | 6 - .../test_artifacts/actual/src/experiment.py | 664 ------------- .../test_artifacts/expected/plan/cols.yaml | 5 - .../test_artifacts/expected/plan/data.yaml | 51 - .../test_artifacts/expected/src/experiment.py | 664 ------------- .../testcase_export/test_script.py | 73 -- .../test_artifacts/actual/.workspace | 2 - .../test_artifacts/actual/plan/defaults | 2 - .../test_artifacts/actual/src/__init__.py | 2 - .../test_artifacts/actual/src/experiment.py | 380 ------- .../test_artifacts/expected/.workspace | 2 - .../test_artifacts/expected/plan/defaults | 2 - .../test_artifacts/expected/plan/plan.yaml | 25 - .../test_artifacts/expected/requirements.txt | 7 - .../test_artifacts/expected/src/__init__.py | 2 - .../testcase_export_federated/test_script.py | 95 -- .../workflow/NotebookTools/README.md | 50 + .../testcase_export/test_101_MNIST.ipynb | 344 +++++++ .../test_artifacts/expected}/.workspace | 0 .../test_artifacts/expected}/plan/cols.yaml | 0 .../test_artifacts/expected/plan/data.yaml | 8 + .../test_artifacts/expected}/plan/defaults | 0 .../test_artifacts/expected/plan/plan.yaml | 10 +- .../test_artifacts/expected/requirements.txt | 4 +- .../test_artifacts/expected}/src/__init__.py | 0 .../test_artifacts/expected/src/experiment.py | 228 +++++ .../testcase_export/test_script.py | 112 +++ .../test_MNIST_Watermarking.ipynb} | 86 +- .../test_artifacts/expected/.workspace | 0 .../test_artifacts/expected/plan/defaults | 0 .../test_artifacts/expected}/plan/plan.yaml | 0 .../test_artifacts/expected}/requirements.txt | 0 .../test_artifacts/expected/src/__init__.py | 0 .../test_artifacts/expected/src/experiment.py | 0 .../testcase_export_federated/test_script.py | 134 +++ 38 files changed, 883 insertions(+), 3070 deletions(-) delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py delete mode 100644 tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py create mode 100644 tests/openfl/experimental/workflow/NotebookTools/README.md create mode 100644 tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_101_MNIST.ipynb rename tests/{github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected}/.workspace (100%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected}/plan/cols.yaml (100%) create mode 100644 tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml rename tests/{github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected}/plan/defaults (100%) rename tests/{github => openfl}/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml (55%) rename tests/{github => openfl}/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt (53%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected}/src/__init__.py (100%) create mode 100644 tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py create mode 100644 tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_script.py rename tests/{github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb => openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_MNIST_Watermarking.ipynb} (86%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export => openfl/experimental/workflow/NotebookTools/testcase_export_federated}/test_artifacts/expected/.workspace (100%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export => openfl/experimental/workflow/NotebookTools/testcase_export_federated}/test_artifacts/expected/plan/defaults (100%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected}/plan/plan.yaml (100%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual => openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected}/requirements.txt (100%) rename tests/{github/experimental/workflow/NotebookTools/testcase_export => openfl/experimental/workflow/NotebookTools/testcase_export_federated}/test_artifacts/expected/src/__init__.py (100%) rename tests/{github => openfl}/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py (100%) create mode 100644 tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb b/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb deleted file mode 100644 index dcd327ed1a..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/301_MNIST_Watermarking.ipynb +++ /dev/null @@ -1,924 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "dc13070c", - "metadata": {}, - "source": [ - "# Workflow Interface 301: Watermarking\n", - "\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/intel/openfl/blob/develop/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "8f28c451", - "metadata": {}, - "source": [ - "This OpenFL Workflow Interface tutorial demonstrates Watermarking of DL Model in Federated Learning. Watermarking enables the Model owner to assert ownership rights and detect stolen model instances. \n", - "\n", - "In this tutorial we use Backdooring to embed Watermark on a DL model trained on MNIST Dataset. This involves training the DL model with both the actual training data and the backdoor (a.k.a Watermark dataset). Watermark dataset is designed by the Model owner and consists of mislabelled input and output data pairs. Watermarked model performs normally on the Target dataset but returns incorrect labels on the Watermark dataset. Watermark dataset needs to be hidden from the Collaborators and Watermarking embedding needs to be performed at a trusted entity (Aggregator in this case)\n", - "\n", - "This workflow demonstrates: \n", - "- Flexibility to define the Watermark embedding steps as Aggregator processing steps without any involvement of Collaborators\n", - "- Ability to define Watermark dataset as a private attribute of Aggregator entity\n", - "- Flexibility to select a subset of collaborators on which Model Training is performed every training round\n", - "- Visualize the Workflow as a Graph\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "a4394089", - "metadata": {}, - "source": [ - "# Getting Started" - ] - }, - { - "cell_type": "markdown", - "id": "ff167e44", - "metadata": {}, - "source": [ - "Initially, we start by specifying the module where cells marked with the `#| export` directive will be automatically exported. \n", - "\n", - "In the following cell, `#| default_exp experiment `indicates that the exported file will be named 'experiment'. This name can be modified based on user's requirement & preferences" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7e9a73bd", - "metadata": {}, - "outputs": [], - "source": [ - "#| default_exp experiment" - ] - }, - { - "cell_type": "markdown", - "id": "e69cdbeb", - "metadata": {}, - "source": [ - "Once we have specified the name of the module, subsequent cells of the notebook need to be *appended* by the `#| export` directive as shown below. User should ensure that *all* the notebook functionality required in the Federated Learning experiment is included in this directive" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "857f9995", - "metadata": {}, - "source": [ - "First we start by installing the necessary dependencies for the workflow interface" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f7475cba", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "# !pip install git+https://github.com/securefederatedai/openfl.git\n", - "!pip install -r workflow_interface_requirements.txt\n", - "!pip install torch\n", - "!pip install torchvision\n", - "!pip install matplotlib\n", - "!pip install git+https://github.com/pyviz-topics/imagen.git@master\n", - "!pip install holoviews==1.15.4\n", - "\n", - "\n", - "# Uncomment this if running in Google Colab\n", - "#!pip install -r https://raw.githubusercontent.com/intel/openfl/develop/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt\n", - "#import os\n", - "#os.environ[\"USERNAME\"] = \"colab\"" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7bd566df", - "metadata": {}, - "source": [ - "We begin with the quintessential example of a pytorch CNN model trained on the MNIST dataset. Let's start by defining our dataloaders, model, optimizer, and some helper functions like we would for any other deep learning experiment" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9bd8ac2d", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torch.optim as optim\n", - "import torch\n", - "import torchvision\n", - "import numpy as np\n", - "import random\n", - "import pathlib\n", - "import os\n", - "import matplotlib\n", - "import matplotlib.pyplot as plt\n", - "import PIL.Image as Image\n", - "import imagen as ig\n", - "import numbergen as ng\n", - "\n", - "random_seed = 1\n", - "torch.backends.cudnn.enabled = False\n", - "torch.manual_seed(random_seed)\n", - "\n", - "# MNIST Train and Test datasets\n", - "mnist_train = torchvision.datasets.MNIST(\n", - " \"./files/\",\n", - " train=True,\n", - " download=True,\n", - " transform=torchvision.transforms.Compose(\n", - " [\n", - " torchvision.transforms.ToTensor(),\n", - " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", - " ]\n", - " ),\n", - ")\n", - "\n", - "mnist_test = torchvision.datasets.MNIST(\n", - " \"./files/\",\n", - " train=False,\n", - " download=True,\n", - " transform=torchvision.transforms.Compose(\n", - " [\n", - " torchvision.transforms.ToTensor(),\n", - " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", - " ]\n", - " ),\n", - ")\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, dropout=0.0):\n", - " super(Net, self).__init__()\n", - " self.dropout = dropout\n", - " self.block = nn.Sequential(\n", - " nn.Conv2d(1, 32, 2),\n", - " nn.MaxPool2d(2),\n", - " nn.ReLU(),\n", - " nn.Conv2d(32, 64, 2),\n", - " nn.MaxPool2d(2),\n", - " nn.ReLU(),\n", - " nn.Conv2d(64, 128, 2),\n", - " nn.ReLU(),\n", - " )\n", - " self.fc1 = nn.Linear(128 * 5**2, 200)\n", - " self.fc2 = nn.Linear(200, 10)\n", - " self.relu = nn.ReLU()\n", - " self.dropout = nn.Dropout(p=dropout)\n", - "\n", - " def forward(self, x):\n", - " x = self.dropout(x)\n", - " out = self.block(x)\n", - " out = out.view(-1, 128 * 5**2)\n", - " out = self.dropout(out)\n", - " out = self.relu(self.fc1(out))\n", - " out = self.dropout(out)\n", - " out = self.fc2(out)\n", - " return F.log_softmax(out, 1)\n", - "\n", - "\n", - "def inference(network, test_loader):\n", - " network.eval()\n", - " correct = 0\n", - " with torch.no_grad():\n", - " for data, target in test_loader:\n", - " output = network(data)\n", - " pred = output.data.max(1, keepdim=True)[1]\n", - " correct += pred.eq(target.data.view_as(pred)).sum()\n", - " accuracy = float(correct / len(test_loader.dataset))\n", - " return accuracy\n", - "\n", - "\n", - "def train_model(model, optimizer, data_loader, entity, round_number, log=False):\n", - " # Helper function to train the model\n", - " train_loss = 0\n", - " log_interval = 20\n", - " model.train()\n", - " for batch_idx, (X, y) in enumerate(data_loader):\n", - " optimizer.zero_grad()\n", - "\n", - " output = model(X)\n", - " loss = F.nll_loss(output, y)\n", - " loss.backward()\n", - "\n", - " optimizer.step()\n", - "\n", - " train_loss += loss.item() * len(X)\n", - " if batch_idx % log_interval == 0 and log:\n", - " print(\"{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}\".format(\n", - " entity,\n", - " round_number,\n", - " batch_idx * len(X),\n", - " len(data_loader.dataset),\n", - " 100.0 * batch_idx / len(data_loader),\n", - " loss.item(),\n", - " )\n", - " )\n", - " train_loss /= len(data_loader.dataset)\n", - " return train_loss" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "f0c55175", - "metadata": {}, - "source": [ - "Watermark dataset consists of mislabelled (input, output) data pairs and is designed such that the model learns to exhibit an unusual prediction behavior on data points from this dataset. The unusual behavior can then be used to demonstrate model ownership and identify illegitimate model copies\n", - "\n", - "Let us prepare and inspect the sample Watermark dataset consisting of 100 images = 10 classes (1 for each digit) x 10 images (per class). Watermark images were generated by superimposing a unique pattern (per class) on a noisy background (10 images / class). (Reference - WAFFLE: Watermarking in Federated Learning https://arxiv.org/abs/2008.07298)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bcad2624", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "watermark_dir = \"./files/watermark-dataset/MWAFFLE/\"\n", - "\n", - "\n", - "def generate_watermark(\n", - " x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir\n", - "):\n", - " \"\"\"\n", - " Generate Watermark by superimposing a pattern on noisy background.\n", - "\n", - " Parameters\n", - " ----------\n", - " x_size: x dimension of the image\n", - " y_size: y dimension of the image\n", - " num_class: number of classes in the original dataset\n", - " num_samples_per_class: number of samples to be generated per class\n", - " img_dir: directory for saving watermark dataset\n", - "\n", - " Reference\n", - " ---------\n", - " WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298)\n", - "\n", - " \"\"\"\n", - " x_pattern = int(x_size * 2 / 3.0 - 1)\n", - " y_pattern = int(y_size * 2 / 3.0 - 1)\n", - "\n", - " np.random.seed(0)\n", - " for cls in range(num_class):\n", - " patterns = []\n", - " random_seed = 10 + cls\n", - " patterns.append(\n", - " ig.Line(\n", - " xdensity=x_pattern,\n", - " ydensity=y_pattern,\n", - " thickness=0.001,\n", - " orientation=np.pi * ng.UniformRandom(seed=random_seed),\n", - " x=ng.UniformRandom(seed=random_seed) - 0.5,\n", - " y=ng.UniformRandom(seed=random_seed) - 0.5,\n", - " scale=0.8,\n", - " )\n", - " )\n", - " patterns.append(\n", - " ig.Arc(\n", - " xdensity=x_pattern,\n", - " ydensity=y_pattern,\n", - " thickness=0.001,\n", - " orientation=np.pi * ng.UniformRandom(seed=random_seed),\n", - " x=ng.UniformRandom(seed=random_seed) - 0.5,\n", - " y=ng.UniformRandom(seed=random_seed) - 0.5,\n", - " size=0.33,\n", - " )\n", - " )\n", - "\n", - " pat = np.zeros((x_pattern, y_pattern))\n", - " for i in range(6):\n", - " j = np.random.randint(len(patterns))\n", - " pat += patterns[j]()\n", - " res = pat > 0.5\n", - " pat = res.astype(int)\n", - "\n", - " x_offset = np.random.randint(x_size - x_pattern + 1)\n", - " y_offset = np.random.randint(y_size - y_pattern + 1)\n", - "\n", - " for i in range(num_samples_per_class):\n", - " base = np.random.rand(x_size, y_size)\n", - " # base = np.zeros((x_input, y_input))\n", - " base[\n", - " x_offset : x_offset + pat.shape[0],\n", - " y_offset : y_offset + pat.shape[1],\n", - " ] += pat\n", - " d = np.ones((x_size, x_size))\n", - " img = np.minimum(base, d)\n", - " if not os.path.exists(img_dir + str(cls) + \"/\"):\n", - " os.makedirs(img_dir + str(cls) + \"/\")\n", - " plt.imsave(\n", - " img_dir + str(cls) + \"/wm_\" + str(i + 1) + \".png\",\n", - " img,\n", - " cmap=matplotlib.cm.gray,\n", - " )\n", - "\n", - "\n", - "# If the Watermark dataset does not exist, generate and save the Watermark images\n", - "watermark_path = pathlib.Path(watermark_dir)\n", - "if watermark_path.exists() and watermark_path.is_dir():\n", - " print(\n", - " f\"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... \"\n", - " )\n", - " pass\n", - "else:\n", - " print(f\"Generating Watermark dataset... \")\n", - " generate_watermark()\n", - "\n", - "\n", - "class WatermarkDataset(torch.utils.data.Dataset):\n", - " def __init__(self, images_dir, label_dir=None, transforms=None):\n", - " self.images_dir = os.path.abspath(images_dir)\n", - " self.image_paths = [\n", - " os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir)\n", - " ]\n", - " self.label_paths = label_dir\n", - " self.transform = transforms\n", - " temp = []\n", - "\n", - " # Recursively counting total number of images in the directory\n", - " for image_path in self.image_paths:\n", - " for path in os.walk(image_path):\n", - " if len(path) <= 1:\n", - " continue\n", - " path = path[2]\n", - " for im_n in [image_path + \"/\" + p for p in path]:\n", - " temp.append(im_n)\n", - " self.image_paths = temp\n", - "\n", - " if len(self.image_paths) == 0:\n", - " raise Exception(f\"No file(s) found under {images_dir}\")\n", - "\n", - " def __len__(self):\n", - " return len(self.image_paths)\n", - "\n", - " def __getitem__(self, idx):\n", - " image_filepath = self.image_paths[idx]\n", - " image = Image.open(image_filepath)\n", - " image = image.convert(\"RGB\")\n", - " image = self.transform(image)\n", - " label = int(image_filepath.split(\"/\")[-2])\n", - "\n", - " return image, label\n", - "\n", - "\n", - "def get_watermark_transforms():\n", - " return torchvision.transforms.Compose(\n", - " [\n", - " torchvision.transforms.Grayscale(),\n", - " torchvision.transforms.Resize(28),\n", - " torchvision.transforms.ToTensor(),\n", - " torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize\n", - " ]\n", - " )\n", - "\n", - "\n", - "watermark_data = WatermarkDataset(\n", - " images_dir=watermark_dir,\n", - " transforms=get_watermark_transforms(),\n", - ")\n", - "\n", - "# Set display_watermark to True to display the Watermark dataset\n", - "display_watermark = True\n", - "if display_watermark:\n", - " # Inspect and plot the Watermark Images\n", - " wm_images = np.empty((100, 28, 28))\n", - " wm_labels = np.empty([100, 1], dtype=int)\n", - "\n", - " for i in range(len(watermark_data)):\n", - " img, label = watermark_data[i]\n", - " wm_labels[label * 10 + i % 10] = label\n", - " wm_images[label * 10 + i % 10, :, :] = img.numpy()\n", - "\n", - " fig = plt.figure(figsize=(120, 120))\n", - " for i in range(100):\n", - " plt.subplot(10, 10, i + 1)\n", - " plt.imshow(wm_images[i], interpolation=\"none\")\n", - " plt.title(\"Label: {}\".format(wm_labels[i]), fontsize=80)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d82d34fd", - "metadata": {}, - "source": [ - "Next we import the `FLSpec`, `LocalRuntime`, placement decorators (`aggregator/collaborator`), and `InspectFlow`.\n", - "\n", - "- `FLSpec` – Defines the flow specification. User defined flows are subclasses of this.\n", - "- `Runtime` – Defines where the flow runs, infrastructure for task transitions (how information gets sent). The `LocalRuntime` runs the flow on a single node.\n", - "- `aggregator/collaborator` - placement decorators that define where the task will be assigned\n", - "- `InspectFlow` – Utility to visualize the User-defined workflow as a Graph (only currently compatible in flows without loops)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "89cf4866", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "from copy import deepcopy\n", - "\n", - "from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator\n", - "from openfl.experimental.workflow.runtime import LocalRuntime\n", - "from openfl.experimental.workflow.placement import aggregator, collaborator\n", - "from openfl.experimental.workflow.utilities.ui import InspectFlow\n", - "\n", - "\n", - "def FedAvg(agg_model, models, weights=None):\n", - " state_dicts = [model.state_dict() for model in models]\n", - " state_dict = agg_model.state_dict()\n", - " for key in models[0].state_dict():\n", - " state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts],\n", - " axis=0, \n", - " weights=weights))\n", - " \n", - " agg_model.load_state_dict(state_dict)\n", - " return agg_model" - ] - }, - { - "attachments": { - "image.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiQAAAHCCAYAAADfBtJyAADpl0lEQVR4nOxdB3xjxfFe9WLJvdfzna83jqMTem+hd0JCC5B/ICEhQEgjEFoSSGgJPaG3QOglJPR6vVf33m1Zlq2u/3yr93yyLN/ZPvvkMt/93kl+emXfvtnZb2ZnZ/WhUEgwGIzhgdqNZsXm0n2e39J1wae13YfVd7lzW1zejHiXi7HnoBGaUJbN1JSbaKo/Nt/4n3PnZ760aNb0dfEuF4MxUaGPdwEYjImGru7uxL+88811f93s+6nD7UsiTq+Jd5kY8UBI1Hf15mJbWSeWPrm+49KfHtj11+sOn/8Xo8HgjXfpGIyJBiYkDMYw4PP5DPd8tOHnd6113+QNBI3xLg9jfACktNHly/7tx7W3Wry9vVcfu8/fDQaDL97lYjAmEpiQMBjDwNfrtx74tzWdP2IywogFyMVdq503HT6v6ZNFxfk8fMNgDANMSBiMYeD5Ms8FrS5verzLwRi/aHC6c/69pvp0JiQMxvDAhITBGAa+qmw7KN5lYIx/fLm97uB4l4HBmGhgQsJgDAO1nb35gmNYGbtAV68vMd5lYDAmGpiQMBjDQDh2RBfvYjDGOTAtPN5lYDAmGpiQMBgMBoPBiDuYkDAYUwQWvU4cWZIu5mfZ5d9dHr/4uLRFbGt1iX0LksXh0weP1Q2GQuJvX1eKHl+gb9+++XTOjHThcPvEo99W9Tu+MMkijpmVIVIsRqHVCBGg8x29PvFZRZvYTvdDOsaZaQni9AU5g94zRP+eX10n6rrcu/fgDAZjQoAJCYMxBTArPUE8dOoicXBxqrAYwkNOgWBIVHX0iMMf+UocVpwm7j5x3qDn+4NB8dTKmj5Cgmvce/J88R06zxsIiq8q28WGJmff8fsXpogHTl3Yd6+Qcr8uIi+PE3n5w8fbJTHa2T1x/BcV7UxIGIwpAiYkDMYUwDUHFYujZmaIeurcr31jlWjs9oi5GTZRlGIRLS6PeG5NnfiCSAWwd16y+Msp84XHHxQXv0THOj2SUHT27sjzdfysDLE0P1keY9BpxDUHF4tr39wg/wbgFdHTf/CKnPHMcrnv3MW54pJ9CsW135kuvq3pFJ+Ut4qDHvpc/pZpM4kXL1wqmcv172wUq+oc8p4bI0gOg8GY3GBCwmBMARQmW4WGSEKvNyBW1nVKQvJVVXvf7yAq9YonwqzXiWAo7KEAMajq7O13LbNeKy5aki9M9Pn8qlpx2Ix0cczMTJGXaBbl7T39jvXTNZbVdIhuuu+aeoc4uiRDTE9LEHvnJonXNjaIr6s75HEFSRZ5TyyttYlIiLqfwWBMHTAhYTCmAN7c1CiOm50pSjISxPqfHSE+2Nos3t7SJJ5fUyt8geEtsDk73SaOnZUp2lxe8eiyKoH5JBftXSBOn5ct7vmiPOY5GLo5bX62yLSbMANF1Hb1xjyOwWBMXTAhYTCmAP65qkYGsV6+X6E4qiRDnLUoV5yxMEdcf+gMcfrTy0Vpm2tI1zHqtHLYxWbSi39vaBDLajrFgiy7+B4RklMX5IinVtWK1p4d68qlJhhF6+9OkN/1Oo3QajTi1XX14nUiSAwGgxEJJiQMxhQAhl9eWV8v3t/WLGakWsUFe+WLy/cvEguyE8Vl+xaKmz/YLIdLdoXCZIs4fnam/H7S3Cyx9rrDhc0YViOLc5LkzJv36B4qfIGgnFmzJDdJpBM5WVnbKX729kYZl8JgMBiRYELCYEwBpFoMctikudsj1jR0iVpHqZwJc+j0NJFrN8kA1KEM3RxE5xQToXF5A3Qtr5wOjGm/CUadSKZ7nDQnqx8hcbr94synl4nT5ueIh05fJANmEX/y589KhS84vKEiBoMxucGEhMGYArjrhHniyBnpYlltJ5EJPxEUo5wlA0KxvtEpAsFdX8NApOXifQqEQa8Vzy2vFlf9e52c8gv85shZ4pZjZ4vj52QK2/v91Qpox7/WN4gDi1LE1QcWi58fOkN8U90uPi5vG4MnZTAYExVMSBiMSQ5MwS1rc0lCcOaCHGEkQoHZL9tbusWzq2rFg19XSGKyK2BIZmF2ovD6gzJI1hvBYlYQ0en2BORMm6NmDEyw1usPiD99WiZOnJ0lilKt4o7j54kTnvxGdLp9A45lMBhTE0xIGIxJDoyM/OmzMvG3byplUCqm/4J/gFBg6CWajHxR1S7ybv/PgNwjaxu7xLx7Ppbfu6KIxIelLaL47v/KZQfhgcG5OX8IXwP3ACo7esTi+z6VeUsQ0+L0+PvOR/Kzwjs+DF/bwySFwZiKYELCYEwBgHREEoCdAYGobREzZXa1H4DHpT3qN4+//98gJ45BPCIo32DXZjAYUwNMSBgMBoPBYMQdTEgYDAaDwWDEHUxIGAwGg8FgxB1MSBiMSQ67SS/2yUsWK+o6hxxHMtbAOjgo0/Lazn6zdRgMxtQFExIGYxIDM2rOXZQrbjlmtrj6tXXirS1N8S6SRLLZIP544lxx6lPL+6WaZzAYUxdMSBiMSQyrQSfOIULy8tp6uXbNO1ub5YwWZFY9Y36OTI6Gxe4+K28T/97UKDO6XrR3vpiemiCPw3mY7vu9JXlidqZdzrR5blWtaOr2iEv3LRTZdrPY1tItHlteJTO9lqRaxcX7FEqvjNPtE08srxbtPT5xxf5FMu18Q5dbPPR1pSwbpiD/4bg5osbhFo99WyXoT3H1AdOEncjKt1Ud4pUN9cJG5fz+0gIxLcUqenwB8cSyalHR0bPzh2YwGBMSTEgYjEmMJTlJMg/J48uqxP2nLhRLc5PE8rpOccW+RWJRTqK48t9rxalzs2XKdxCSO4+fK8rbe8T172wUNxxWIg6eliLK2l3iUiIZpzy1TLQRubAYtOKJs/YSa+sc4s6Pt4t7T54vh4I+3N4iHjhtobj3szLxSXm7eObcJaIkLUF87GgV/yBi4iJC8egZi8QFi3PFG5ub5FThX3+wRXpIEog4vXLRPuIZIjsfbGsWD1BZPf6AWEdk6LR52eKMZ1YIJ/KbcLZ5BmPSggkJgzFJAQ/EudT5P72qRmxtdYl1DdS5z8+WHo8T5mSKe4g4RK5fY6LjD5+RLv702bcycVksSO+KQS8OKkoVq4mQYIE9EIvDp6fJdXISjHrx39JWoUN6WAUzUhPEHUR02nq9YlaGTdR3uQdcN9VqFNNTreK9rc0yeyvIzekLcsS21h2rEA9WJgaDMTnAhITBmKSYkWYVhxanCZNBJ/YtSBGFSRYxP8su7vykVAaSmvXafsejuwe5AJHZGUBK3Ir3otbhFstrOkVHr08SCp1G04+MABfslUckpFdc/+5m8cvDS+i+ugHXBNnA/ZHFFcBCgPC6BNglwmBMGTAhYTAmKQ6Zlia9Ij98da38O8tmEu9esr/YNz9ZvLmxUZy/V74oa+8R3ylOlSQCJOWdTU3iB0sLxHOra8U+dNxXle0Drotsq6+tbxCHEdl5bk2dAIfAvs3NTlHT2Su+v3eB2N7aLQqSzfL4RqdHroGzgMhQktkgerwBSWgaaP/B8LQ0OCShQRzLZfsWio/LWuVCgPd/US5jVhgMxtQAExIGYxhIMGpdLo9IiHc5hgLEfnxdvYNQIDX7Te9vFg6PTzy1qlZUEnnAEE0wEOrzRNzyv63ixNmZ4mAiKW5lnZtu+rzn8zL5CfiCIXHb/7aJ4+i4w6anCY8/KOocbtHi8opr39wgvjsvS8zJtMtF+OD3wJAR4kQOnpYqtrR0i2U1HURg/OLnb28Ux9M1Ei168fbmJvEz+vu0BeFAWwwnfUPHJZn14oGvKkSvLxCXOmQwGHsOTEgYjGFgWmpCZXNDT2a8yzEU/K+0td/fGI5BbAZw8uwssXd+knATaViUmyRu/mCz3H/B4jyRYTcJg1YjshJN4sV36yUZeGV9Q79rIUD1tQ3992GGzg+W5gv4NDBLB/EfX1V1yHu8GnUsgNkyf/+mst8+eGYiAeLy742NI3n8uCIvxVYX7zIwGBMNTEgYjGHguLlZH6xorNwnGArtPNBinOOTijaxssEhV+fFDJxOdzhh2r+IOFiN4RiP+74sF13uoSdSA3l4YkWNTHoGhwuGcUBGpho0GhE6ZmHRh/EuB4Mx0cCEhMEYBs6blfTiS6v0527r9M2Kd1l2B91ev9yigRkunYOsyLsrBHjFXolF2YnrTpid9V68y8FgTDQwIWEwhoF5xYWbfnOI47Yff1j7oMPtS4p3eRjjC8kWQ+cfj8i/oSg3uyreZWEwJhqYkDAYw8T5B857IS3J1vbT9yv+ur2te2YoJDS7Posx2bFPfvKKPx5RcMPh86d9otFoeL4ygzFMMCFhMIYJnU4XOGFh8XuHzMj6/ON1ZUd8um77Yd1ujy3e5YoHNFqd0Z+Qur856Nnqc3U1x7s88UBSgtlx7N7z/nPA7MJvEqwW167PYDAYscCEhMEYIWxWa/cpByx8C1u8yxIvtLe3p69bt+619PTsRxcsWPB+vMvDYDAmLpiQMBiMESMUCmkCgYAlNMFnHTEYjPiDCQmDwRgxiIzoaTObTKbueJeFwWBMbDAhYTAYIwY8JLTpCTzfl8Fg7BaYkDAYDAaDwYg7mJAwGAwGg8GIO5iQMBgMBoPBiDuYkDAYjBHD7/cjqNXCQa0MBmN3wYSEwWCMGJjuy0GtDAZjNMCEhMFgMBgMRtzBhITBYAwLmOrb29trxafP57PSLo3f77e6XC4b1nAxmUxupNePdzkZDMbEAhMSBoMxLHg8HtO2bdv+0N3dPQfxI16vN3X79u336/V6F22d8+fP/7Hdbm+LdzkZDMbEAhMSBoMxLJhMJg8RkS3Nzc3/FwwGDdjX2tp6CH2EiouL72YywmAwRgImJAwGY1jAsMzs2bNfbGhouN7tdpeo+4mIbM3Ozn4inmVjMBgTF0xIGAzGsGGz2RwZGRl/q6uru1vxkoSSkpKeys3NLYt32RgMxsQEExIGgzEiFBcXv9zR0XFRd3f33omJieunT5/+HLwn8S4Xg8GYmGBCwmAwRoTMzMy6jIyMh4mQPJKVlfUX+l4b7zIxGIyJCyYkDAZjxCgqKnozEAgcsnDhwmfYO8JgMHYHTEgYjBEAOThKq2tLXtvYdMZnle2H1rS7Cjp7vcnxLteehkar1ZotNkvvG69UxLssexomvc5TkJJQszAvZf2Z0xNePXDRnK8NBoMv3uViMCYqmJAwGMOE2+02v/z56nNuXd3z24r2nuJgKKQVQke/WOJdtPjAiz7YkhrvYuxx+IUobfCVfNzQfMRzm4wXfq+085kbjl7wx5y0lIZ4F43BmIhgQsJgDAPwjDz7yaqLrvm8/QG3P2iOd3kY4wNtPd60+1f7rq3ylhY9d9bCCy0Wc2+8y8RgTDQwIWEwhoE1lfV73bba9RsmI4xowFP27w0Npz9daL74h4cueJRjahiM4YEJCYMxDDy+qvHy6s7ewniXgzF+8cia9itPmd3yVm52Zn28y8JgTCQwIWEwhoEvK9oOjncZGOMb21tdM1vb29OZkDAYwwMTEgZjGKjqcBWFA1gZjNjo9vptHq/PFO9yMBgTDUxIGIxhwBcQhniXgcFgMCYjmJAwGAwGg8GIO5iQMBgTAMlmg3j30v1FYbJVGHQaua/F5RWflLaKx5dXizUNDqHTaMTdJ8wVZyzMFQnG8LBSry8oKjtc4vFl1eLNTY2iy+MXBq1GvPmD/cTeeckD7tPS7RUn//NbOqdnwG8vnLe3OHJmxqBlXFbTIc57fqU4e0GuuOnImbLMVCThC4REQ5db/Gt9vXhmda2op++RyLaZxD/PWSKW5CWJf66oETe/v1kEQuEJKnimh05bKE5fkCOeW1UrbnhvkzDqtGLjz48Qgg655cMt4qlVOzLW/2Bpgbj7xHniy8p2cdGLq0SPLzDsumYwGPEBExIGYwIAnfCinERkBxWbmpzCScSiMNkirj5omjhxbpY44+llYgPtn5VhE8WpVtnpg1TYjHpxYFGqOKAwVTyxrEpc+9YGuppGpFiMIiPBJMrbXaLR6em7T3uPlwhEMGYZ6uia21tdOF0UJFnk/VGODY1OTHkVNZ1uAR5RQPtnpicIlzcgfwOBQrnuILJ03KwMccYzK0Sne0dC0+/OzRaHz0inZ9OKMxfmiKdWVotNzd3hH+leiWa9yCTSYjfp8afcMhKMRLr04o8nzZek618bGmQZLAadPDbZYhCasXsdDAZjDMCEhMGYQOiijvyKV9eIZTWdYv/8ZPHChfuIohSLOGF25o5OnPDSmjrpTUikTvzXR80S1xw8XZy2IEfc/2UFkZCw9wNeiLs/KRWPEVEZCq5/d5P8REd/85Ezxa3HziHC0SWOfPQr4fYH+35Tsb6hSxz12FdCq9GIC5fki3tPni+Jx4GFKeK9bc3yGBCIi/cpEDqtRlRQuUCmTp6TTc9SOqQygXzc990FotbRK76u7hjSOQwGY3yCCQmDMUGxuaVbdPb6xLQUq0g0x/YItNPva+odIhgMSS+L3bTnZwhh2GRVXaf8tBFBSrHsiAs+kggKPD/1Dre4mQjPE+cukV6Sx5dXybLvDH56JicRtOxEs3jo9EXilH8uG+tHYTAYYwgmJAzGBIJZrxNnL8qTQzDnUMe9ODdReAJB8XVVe1/cBbBXXpL40YHFIp8668v3LxJGvVZsrnGKjRFeFL1WIx4+Y5H4G3XmKn753ibx0NeV4qQ5mSLbHk5Gi6GQzyvaxPpG564LGMGKcujeVx8wTcazXLy0QHozmrs94vPK9vChdOyV+xVJkvKP5dXitU2N4lZHr5iXZReHFqeJ1+nvncFPz/2LdzaJ8/fKk7EtD5++kMrZPpRqZDAY4xBMSBiMCQQrde7XHzpDfnd5/WJtfZd4fFmVeH9bS7/jjpiRLjcANOWNjY3il+9vEr2Yt6zVhvfTD4jxqOrcsewKhk1sdI+fHTJDxp4AiCn56Zvrh0ZIIoDhl3tPWdD3N4ZwrqXr1ClBrfMz7OIoIhJuKhO8IcfNyhSt3V5Rkm4TP9incJeEBOjs9YqfvLlBvHXJ/uIYOn8WnctgMCYmmJAwGBMICCL95XubxZYWp+jxBkQ1kQm1g4fHQ8Wr6+vFe1ubxe+PmSNyk8wy9qTO0X92CzwqD35VMSCGxKDTEnHYIJLMYfUQDCH7aLcYEiJWb9lIBOam9zaJu06cJ70eIfqHoFh4XBBXcsq8LEmwgFuOmd3vMkfMSBNZNpNo7fHu8nYbm53SswNPz8wMJiQMxkQFExIGYwIB3orltR0yqHVnqO7oFU+vrJEjKI+cuVicsziPzuuUBCQSFr1WzmLpA/Xwvf6gWFG78+sPBQ4iQf8tbRFXv7ZWvPq9fcWC7EQiSLPF/72xXk4JPn1Brjzu2VW14uPyVvk9hfb/7NAZIsduFifNyRJPr6oZ0r1eWd8g5mXaxa+PniXJDoPBmHhgQsJgTFLAe/Ds6lpx2rwccdK8LPGrI2fJ/Bzq0As8KvedulBuKuAN+f6LK8Wza+pGrRyIGXnkm0rxq6Nmi0v2LRQfbm+RwbjIO9Ll9os/frq9X5kOKU4Vp87PEd+lMr+6oWFI94DX5a9flosDilLEsbMyR63sDAZjz4EJCYMxAeD2B8RzRC6QZKzVFXsYAzEh/ytrFW3U2cMbAnKB6bg3f7BJ1DndMh8Ihk7WNXaJd7Y0yVk6A68R6psWvDOsa+gST62sEeVtPf2CaYG1ym9lba6+3/5GhASBtZl2syhKsQqb0SvJUgUdU9G+I4YFM2eQHK2DiEpHj1eW+bPyNuGh5/6qql0+E47BufCERMa/dNBz/+4/W0S90yO2NDvlcQwGY+KACQmDMQGADKtXvrZup8eg87/vi/IB+9c1Ounctf323fa/bSMuC7r5tzY3yS3Wb29ubpRbJBqIJNz43uZ++55YUR3z+ghmjQxoffjbKrmpCBA5uWqQuvimppO21UN8EgaDMZ7AhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwZjEwAq6tx87R2xu7hZPrqwWoXEy8eSCxXkyAPaFtaM3vZjBYExsMCFhMCYxFmcnilPmZYu9cnvlVN/Gbo/cH15oTy+Q3NUbCMkMsMjlYSYCg7VlkFrM4w/K/TjWphyLacTdtM9s0Mk1arBoH2YAYYot8pEl0H6s4AuygUyyWFDPSn+rGVkdbr9M7laUbBEJdM2MBKPopuOQ0h7Xw7kBuhaOU8uTYAzfG9dy0bEMBmNyggkJgzFJgU789AU54q+fl4sjS9LFIcVp4pX19UJHzOH3R88SGTaTTD1/+PQ08cPX1onKjh7xz7P3knk8QEaW5CaKs55dKc5blCNOmpstVtR1im0t3eKjslbx2BmLRXO3V6QTofiqqkPc+0WZmJNuE3efME+sqneI6WlW0ehwixve3ywOp/vunZ8sFmTZxeZmp7jjk+1yEb59C5LFFfsXiQ+3tchF9x48daFMA1+UbBVvb24Uz62pE9cfMkPMpfOwf3WdQ3xAxwbHi5uHwWCMKpiQMBiTFCVpCeLAwhTx0FcVcgG7s4icvLaxQZwyJ0vMzrCJi15aLT0bWDcGQzs/PrBYNLu8Ml9IhtUoXv3ePpLUpND3lURGbv9ou/SC/OqImfK4n7y9QXo6Xjx/qXj420q5Hg0IzzOra8W5i3LFkcrifkjAlpVolnlSzqP9f0GuFOIUnxCxufOTUnnNO46bK5bVdoo76B6z0hPEU+cukVla04jwfFTaOmjOEgaDMXnAhITBmKQ4Ynq6JBzzsu3CbNSJ+fS5NDdJZNpMMhMqhkAwHANgiKaAyEWtwy2HVAaDlo7MT7KIhdmJ0huCVO/VdA6GZeAtURf6U2Ez6sUzRC6Q2Ozb6g5xydKCAWvN4O8cu0l8Wt4mSQuGgLAicR6RGAaDMXXAhITBmIRA7MXJ87LEf7a3yCEaDM2UtfeIo2dmiFV1DnHK3CyRZjXKGBAddf4YBUG69QOLUmUciBozEg0MlyBAFnEev/pgsyQ1ero+iEQN3WNupk18QsQC8SlAqsVAm1GmfUesijrcAqJkU45BGbAKMNLagyBhYb1ur1/URK1OzGAwJjeYkDAYkxDwYICIYMVfNZDVRkTjmoOLxX1floeHck5bKOocvcJuDAehvri2XhxEhOQB2t/j8fd5MkA+1HVh8P+TK6rFfacsEI+cvli09njFyppO8cyaWnHv5+XilqNnEymxi1y7SbTRb/VOt1ydGN6U9l6fXIcHpOSj8lbxh2PniHtOni/e2tgor/nnk+aLB05dIFf8/eMnpURggsJPW/RaOQwGY3KCCQmDMQxk2ozNFc5AcbzLsStgcb0T//Ftv30vra+XG7wnD39TKRqIqBxWnCb2ykkSDrePyEZI3PDeJkkaLlqSL4dmfERE7o1aHwczby7915oB92wg8nHxy6uk1+TRMxbLRfpAZL7/ysBjV9Y5xAlR5bvwxVUDjvtF1Po3EwF6rcav02l5OhCDMUwwIWEwhoH52UkbK5zt456Q7AwpFoO4dN8iGfOB1X1veHeTqO9yixlpCeKK/Yrkqrwef0Bc9/ZGOR13qFiSmySOnZUpdFpNOO/JFA1EJSJXm2S3O+JdDgZjooEJCYMxDJwzL/3lj6scR7i8gYR4l2WkwMq7t380cLXf0jaXuPG9TSO+LuJVsE11HFFg/TgnK6Mh3uVgMCYamJAwGMPAGXsVvvbBlvrjnt/WfUEoJGKEfTKmMnITzfU3H1Fyh9Vi6Yl3WRiMiQYmJAzGMJBgtbpuP3Hhrzo0ZSnvbmk+Md7lYYwfgIz8/bjCq2fkZJTFuywMxkQEExIGY5goys6oev2i5NP++cWmHzyxqeuyNfWOvTz+oCne5dqTsGiFSDWGRIpBCIM2JDxBITq8GtHu08jvUwUajQgVp1grTipOeOcXh8/6U35Gaq1Go+FpQQzGCMCEhMEYAQwGg++ywxY+ceZeXa92OhzJwWBIG+8yjSZCImRyulxpDoczzdHlTDNZLDl6k3mWw9k93eF0zQj6/Yl2i6HNrNf3uFzd0212+3aXx5fkdPtSDXq9I9FuK0uyJ5Q5OzvLNCJYn2S3tycl2tusFnO7QW+YVMMZCQkWV1pKSpvRaPTGuywMxkQGExIGY4TQarXB1JTkdmzxLstw4ff79b29vQk9PT1yCwaDGd3d3TPa2tpmdnR0zLBardkWi8UYCgYNZm3Q6GiudyUnJ28vSE5+d35hdpnb7a5JTEzspHNTS0tL39l/yaJz6fwQbSlmsznP4XCUdHa2T/d3O45MSUlJFD6319nh9dZUdPq8Xm9rWlrattTU1DKTyVRGxzfRvVx0Txd99lDH7ol3/TAYjD0PJiQMxiREKBTSBAIBHYgHPol82IlozGptbZ3R0tJSQuSiSKPRZHg8nkwiFZl0XMBms5XZ7fYy+lzrcrneIiLRQMShgT4bExISumLdp6KiYiYRsxCdU5+dnd0W6xi6t629vT2Htmy6ZzZdr4j2lRD52d/pdE7X6XRWIiItRE5aaH8r7avLzMwsTU9PL6NylhPxadDr9X46LoANRHBsa4/BYMQDTEgYjEkAeDuowy9oamqaTqSjkEjINOr4i4lYFBPhmEZ/m61WazURiwp8dnZ2riMCUZaXl1dJpKMqKSmpdazKRmSjm+6zHdsgZbcTSZpGWzF9L8zNzZ1GpOOIhoaGS8rLy/N9Pl8iyBKVu4oITSU9XyURFpS7lkhLFQjTWJWdwWDsOTAhYTAmADwej7mjoyObtkzF01Cg1+tLuru7ix0Ox+xQKJRMnXaryWRqS0xMbKutra0jIvBVVlbWM9R5b6P9zYh70el0ftXbEO9nUkHldBYWFq7Hhr/h3SESYlC8O3p69iR6ZnhUQLam07PsTb8fW11dnVJaWprqdDrNimenij630bGlycnJzURUmuHhoX3tHGjKYIx/MCFhMMYBqPM1UMea1NXVlUIkI4k60Mze3t5ZnZ2dM2grNhqNOQkJCX7aT310wEcdcxd1uGWpqan/LSgoeJg68Vqr1dpNx3Tj84ADDpiwK9OBPCBAVA0SJULRlZ6eXkNfP8bfGIJyuVx2xL7AM0TkJJVIC+ppGpGU6fT70VqtVkdETVdeXq5vbm72g6xg+IfqawsdW0N11EnX7UxKSmpHffEwEIMRfzAhYTD2ANCJer1ek9vtNtNmoY40mT4RRIqYjlnUMebp9XobERM7OlkEiFIHui0xMbGMtnep062gv1vpuDb6bCHi4Yz3M8UL8O4goBZbxO5v1C/wrBCxS6Mttb29PZPqKtNsNk+jv6eVlpZeSnWZS+d6DQaDk+q8u6mpyWEymWqysrI2EWGpICJUTX930zm9tLnpOC8TFgZj7MGEhMEYZWDIgciGFQGkZJ2XUIdXQp3k9LS0tDwiG3lk2RcGg0ETdZRl2EpKSrbTsWstFkttRkZGdUpKSj3IR7yfY6ICQ1JELJqwTZs2bcDqfAphyaQ6zyMyWGS32/Oys7NBWM4gwlKCd0QkxEHvppIISQORmgb6u4wIyzZ6PxX0HiuIsPTG49kYjMkMJiQMxggAb0dbW1sBbbnUsRWQ1V5EhKLE5XJN7+7unkm/p1JnVm+1Wmsw3FBbW1tNJGRZQUFBKXV+26kTbJoMVrcSj9Lr8XjsNpst5iyb8QaFsNRjmzVr1vLo3+HN6uzszGtsbJxNhGU6kctpRET26ejoOL2+vj6P/s4FUaF3W07PXEkEppTOqSKiUkdbPW01PHWZwRg+mJAwGDEAK5o6pUza0h0ORxp1ujm0byZ9n0GW9HSNRoOYDrj8XUQ0uhsaGlqoIypNSUn5vKioaAtZ1LVw96NjImvavd9++03KpFmI96CNqsZvjHdZRgsYEqJ3WY1N3UcE04jAYnXYjb4XIWcLbfhcQCTzUCKmdiIxNiKlNpKJZiKdFUlJSWUkC6VUP3WJiYntJB8tmNHEhIXBGAgmJIwpCSU3B2I1EmmzUyeTEgqFiqlTmYPEYNTx5GZlZRlpn5Y2He3zEQHZRp3JdupUPtRqtcjZ0UkWchdtjqkc0zEVEBlkq6Bm+vTpX6h/QJaIqCYTGUnEJ8lHfk9Pj5wZRKT2ECIjNtofqKurC5BsBZxOZxPJEYJsSzFDiGSsGcG12EienIhbicNjMhhxBRMSxqRE5NRRzGChDgPEYxqmjTY1Nc0hQlKUkZGRQpZuBnUQafSbHjED1HFsLykpWedwON6mvxvT09MRz9HCU0cZOwMyzWKjr3WxfsfMKSK1WcqWSaSjSKfTYRrzmeXl5TPou5nkrR3TtonMtDc3N2Oor0zdkDQOQ020+cbbtG0GY7TAhIQxKUDkw4jgUbJIC0nJIwsp0p9PIyICS7WIiEcaKfwqJNdCHEdNTc0aOq2GvlcgyRYRkebJENPBGJ+AFw0bydu2WL+DsLS3txe2tLRAfgtJVqcbjcbDGxoaflBaWlpMBFuPhHbYiPhUNzY2IiEcEsRVELGuJMLcwoSZMdHBhIQxIQBvR2dnZzZmRsDKJGWdR7tnOp3OmS6Xa5rX6y0gJd5A+1vJgmwjRV5DyvqrtLS0ytmzZ29PSkqqVNOOw8JcsmQJkw/GuIFCWPolh8OworqRjCcjfokIy4za2lrM2ppL+w+vqKjI3LJlSybJv5bkv5yICQJsK4iIYzZQQ2pqagPJPpLEtcT7GRmMXYEJCWNcAEMrsBKRO4IUb2pPT08GkQuMv89EcjCPx5Obnp4eIkLhJmLhIcLRS5bidlK4H+Tm5iJ3RCVtXVicTd3i/UwMxkgBb4cyROPH31artScjI6O+pKTkc/ytxkApyeGstCuV2g48hCVE2KfT8d+hv03UdhCEa6TPIBZHJHJSYTabK4PB4HYE2RKBcSAWCls8n5fBAJiQMPYISAFqkRAMMxWgQOkTicGK29vbp5PVh5iOAiIcViImCfTdQvuhhGvIsttCSvhtOgfDLa2w9Eh5dkCBsot6XIDfQRwAb58SUK0uelhGW98UZqTcx+wwIiJpTqczuaCgIMPn882ifdOJtCzBWkfUhnoxZZsO762vr0fm33IMAymrMG/H7DAkhwO5x3dub4yxBhMSxqgBpAOuZnwSsTAhA2lra+s0UoDTaN8cUp45REIykccBmUrpbyz0VoVpsk1NTW8ajcbGnJyc+szMzCqy3lo5pmP8Q1mBt5feqS3eZWHsANYsUpPDxfodhIXaZr4SZJtNbRB5dGa4XK5jKioqruju7i5JSEhoxhpIRErqCDXJycnItVKZm5u7kdpnrZrBFkSF2ypjNMCEhDEiYIiFFFluY2Mj0p/ndXV15ZGimqOsLluEbKQgG6TkkH+hjqy0Kjrn26ysrKr09PTq7OzsmMF9jIkFpUPyUwc3afKQTAWAsFBbrMQW63cMCSHHCgLEm5ubZxA5wfIGS6mdf5fa/DSPx5MJskJtvILa9zYyKKro7zpcLyMjAwG39Tx1mTFcMCFhxASCSIlEpHZ2dmaQYsrp7e3NstvtRfT3HIxVI6aDFBQSf7Xp9fouIhsOUlzbSRl9kp+fXwHvB/3mQu4GTFUkZcWJoCYJ4AVDrE8wGEQcQzrytND7T8F0VljL9O478c7jXU7GyAHPV2ZmZjm2efPmfaxOo8dsNmr7Jq/Xa6EtT1keYRYZIrNJHxxI35Pr6uqSSXfY6RpNycnJW0lvlLvd7koiKPVYgRnT6Dk5HCMWmJBMUWBYBSumYkNyMEyLpX3FWFmWCMgc6mAKU1NTdUgMhk6GFI3PYrHU0L6tJSUl7yNVNv2NBc46kBSMV0ydOkDHVFlZeTNZxceRfBjgDSsrK3uI9vVSJ9OwdOnSsxHnE+9yMkYPkSswY0VpZXdtYWHht/iirN9kQWA6Vq0mmUgkAlLQ3t4+E9PxHQ7Hd7Kzs62kb0Rtba2gv+GFaSCCUkoEBYG224jAtCImBvqEti46n0ntFAMTkkkMWDKwaJD2msiGmSzY2cgcSUpiGimD2aQIkui3RCIfyaRArGTVIp9BJSmZTWQBv47cBlhdNiMjo44URQcnY2IA6JRMJtOHJDNXIggZ+zBUR51WMDc393UmI1MPICzq7DbSFw3K7mWRxyjJ4bAcQwbpmgzSLYUkN5jKfBIRlOuIhGiJmHQSEenq7e3tJPnC2kBltJWTriqFN1YlRfCusD6afGBCEkdgXQwszEZKvHR3r6XEdBTU19fPwdgvfS/Ky8ubQY26gCwXucosYjqwIBjGeUk5rCYlUoYgUiwKhuRK7OFgDBWzZs36D8na50Ruj1f3IQ9Gfn7+P+NYLMY4RkRyuO2xfscQMZLCkf5CMsMCrMBMOurgxsbGi8rKyqbDW4uZd1gVm8hIFR2HeBUkNixPT0+vICI8Kos7EmHKoXu08zDzngcTkjgA7s3KysoFFRUVf+3q6mo67bTTLtjVOSAcZEVkYnXZ5ubmIiIYOdQYEdMxHyvM0t+51IicGFYhK6ORLJAm+m0FEY5nERmfmppaTpaHh6PiGaMByE9hYeEfidjuh5WNsY+s1pcH62wYjF0ByzNgKy4uXoO/oSfVWXv4JFlLJQIMjwqWf5hFem4uyd6RpaWl2evXr8+m40CKy4j0yI2IDPQeksPVk66sQWzTrsqg6OaboFeJXP+6pKTkm7F+bsYOMCHZw0BOgI0bN15ElsBN8FxQI1lGjcpMBMKLQEHaUuiYFPoth/ZNIxIyraOjAw2vKDk52UcdAVyVPURMusiKqKXG9hE1mgfp2GpqoF1kQbiQOwB5A5h0MMYSREi+qqure486iAuxDhDJ4T/iXSbG5IGyknSf8ZSSkoIFCZtmzJjxFf6GkYa4FeQ1UnIcpdF35FkpaWhomEHn7U8k2Uw61Lp9+3Yr6VFvYmJiGVZgpq0U+pP0byuGpWlrJ93pBPkh4oN0BUeSLn6Nzv0LyfXj9DsPQ+4BMCHZQwDzrqioWFRbW3sLCfsJJPgm7CeiMeOjjz56GLk5iHBYaL+BNiM1CvyO5csryOp8H0FftL8VCcHQgPDJY6iMeALj+NOmTXuAFPdx9P3J3NzcyniXiTF1gCy2WBkZm7KrkraV6u+InXO5XEnI/IyN9GamwWBA9ueSxsbGc4nQZKalpfnou5eIj5f0MlZbRhbbveh0DZ2fU1lZeSftP2T27Nm35efnr1Yz5zLGBjslJMpKqXrMSUeHuqcKNdngDwQMm7dsObWtpeUOeD76/eb321IzM+0Wf+B/Ab+/JjUtvZGsgMbUlOR65HcY7JpkCVjGvuTjA7CSlARcgdGMvIdcY8YIrCJso3XdqQRS8tvMCbbH582b+xTJtjne5ZmIgAcAG4I1x8KrCd0dqctH+/rjGaQvetPT0+uwKbv+p/7m8/th+GW2trXltXd0ZJusCTmki+fUVlZkq8dgSrvT6Txlw4YN+9TU1v11yV6L/7bHH2KSQPV2qUsixMr8G5OQQGi/3FJ26LO9ST9c3+ndu6HXn9cTCCWMfZEnJ4wiKGaFTGJR0CZm0ztID3mEyu48QWG6pcF0xjpN0hlyRydtFehzeS0sFQl6bXe2WVc/26bf+D1L58OHzZ/50e5aKusraxc/1+C97Kte4xFNbn9uhzeYOlrlnXooEuKbnpuE4OWDRoJMs64xw6Rr+o7W8d/vl6Q+UpKXM2pxOG6Px/zamm3nvBPKOHurwzu/zu0vDITElCIlu4Zd2YQoqOkRv4z6tU1jFJu9tpzNLZa7N/y36W6fYNt8JDBoNb4ck662JNG45ZRQ40tn7D3nZYvZ3Bt5zABC0tTSkvXHtQ2/e7XXfnFPwMUkZBTgFVqxQZMoNunskpzkhdxiQcgh5oa6RDZ9Tw15Bcv44HD5g7ay7uCssm7frP/pzCec3dPw7M9m2W/NT0uuGe61YMU/8s2ma/7mSr2x00dVL7xc84y4otkdyMa2SVgWvbW299wfdzT+6ZK5mQ/pdNrdGpLdVl45+5eVwQe/7bYd6g32cCbdISBLeISflHGDxiLKNAlirSZZVGustFdLmptVxe7AFwwZqnv9xdi+0CYd9cbXDRf8fk7iz+fmpG1Qj+lHSHw+v+H2tY23v+Ky/SAQCjGLHmVAoN1knEDQsX0kMkVaCDPLWNCHCk8gZH6+2Xdpl6898cH9Ld83m0zuoZ4Lz9+zq7Zf8qeulD94g0FW0IxxhRApgnqvyL+tovfOJO+21jP3mvXiSIdwGlvbsq/f5n7kG7fxMF7/cOhoFiZxn26WaNGYhJO6R665sYE3GDJ+6hDH/HSb+4mnje3fzUoLr7nUR0gwzvh+Zfupb/TazmcysmcAgXdqOK54uAiGhPYDh+a7r5e2nXfuvJynhroK6cbGjsV/7066wRv0MxlhjFv0BkLWP7Yl3HZgu+OL/PSU6uGe7w8E9P+sdv/ft27joWNRvskMeEMYewYg4Ks7PPs9UR649oakpN/p9Tp/X2+IscbnW4OXojHEs5AMxlAAT8lz7eKy0zyeF81m85C8JG/Udp1V0yOmjXHRGIzdRpXLN/2tbXWnX52ect9wz61pbC58qyN4Vohdr4wJgLccmrPOa2h8cnpBXtkOQuJ2mzc4PHvFsVwMxrCwrtOzt9fnMw6VkCxvchwoRNJYF4vBGBWsqGk+gD6GT0iamgsbe5Nyx6BIDMaoo6bHP62hpSunHyHBlLAWTyArngVjMIYDePOGMx29weXNE1NmsjRjosPl9Y1oUkGXqyex22+3j3Z5GIyxAOJJMMyI7/0CGDA2H58ijV+gt0s36USqUSfavQFBpG1I56UZcY5WtHmD8ryxgJYKl2fRCyN9qev1C3cgJCw6jShKMIgef1BU98SeGWui43Gehs7HMb7gyEO3hnK/8YIQy/cA6DWQBZ0w0XskSwUkb5fn0KEiV5G7WjrHsxvyszMk6LUii9qej15cjSJbGfR3plkn7+vwxY73RFtNN2lFtz8k6nt3TyYz6X4ZdD/cv2uQ+403yJTrPFwzAMkGrZQfd3CHPO0Kico5PdQuGnZTlgYDXlQWyZiN5L3JHRBO0qUGaluFVr38rcLlE7GaJdpuLrVds05Lz+MbUtsdDLhfkTUcxFs5yP32BEYlohIdo10f1vV4cbvTwQ0VeFF4gVCOUIjRLwP78TuOc+1GmaAU71iYJo7Otoq/busU99G2K1jp5rcuSBPHZFvEbRvbxTNVzr7fUC5cE7WFhuGOKDfqMUEXfqbo32KhyGoQLxyYTUQyJC5Z1iy2Or3irHybuJXKC0V85peNotE9sBEdmmERf907g4TfLy7+pknU7qKhoQ7NVCg0AJQrsi7PLbCLW+h+NSTEZ33ZIJqGSNgmEtDx4p2iO3JSp7Qn2irq2qYP9ynoWP2h/ncFqQQZRG27SIGNtMnNTTSIx/bNktf63reNYl2nd5fnqHKHMv1gWZPY7tyRqw5yYqayhWS5g/0Um/pMsX6LhQsK7eIns5LFsna3uJTug7Zx9+J0cUJOgni8zCH+sKl9ABlCG/rNvBRxSp5NvFLjFL9ct+v11nRULpRb6gr/jveLdvqnvdLFsdT2Hy3tErdvboc1t8vrTVTg+fHMEDvo07EimtH3tNA9jfSFxLxf/asAKYilL4cLyNKF0+zi0+ZeccXy5l0eD3n94fQkcdn0RPF6XXc/WZL9D5ULsz98Srkjf7PSMxk0A3+LBTzfI/tkiWKbXvxsdav4b1OP2DfVJB6ndgmj8aKvG8XKjoHr/M2xG8Qj+2bK8y/+tkmsjnFMJFAu6DJskOPI97t/qpn0QKb8fh7db21nfNYVHBVCsjTFLF46KFsqyRvXtopnIzrgsUIKWUGv0D0XJpvER/QCLycB64548YdnWMWT+2dJRfPjFc3ildruEd0HjTOPmGOCQm6GArDNAjon2ThwslJxgkE8R8ocn4+UOsQtG9v6FPN02vc8/TaNPn+yqkW8UL3zekzQh1l0o3sHCQAhxPXcOyFhacYw628dInnIMuvFA0RgZlED+OW6VvFuw44EWD0B6ligKEBUJqmuvmluivi/mcnC4Q2K/f9bLTq8Y28pH5hGbergHKnwbqI6/0dFV99vUCggvJeQoizv9kkFAqtmJIAyyzaHLcChAkQf8t3gHig/p+baxANLM0QP9S4XftMovmzdkffo7AKbuI/kqNrlF+d+3SDKugcvM9oaygXvhE5peAHqqsIdVvhzsLeQT2XDcw21vR6dZRG/m58mZRl1qbYLEH08h0qgJql49wF659kDssVMaud/2+4Qt21qG3NLGZ65vxDpO50Mqa1dXikzkZ5WeHI/OzJfdv4Pl3aK321oH/G9ckmPJRt0Q3aTahQPdJpJJ0lrJNAGnqe62p/a6ectveJ8khuVrNoN4d/2o99AnG9ev3NSbFb6C9WoBzxU8bieRvkeC3bZdvUDjJXBgDKrhvKjZV3i/u2dO+4XDPWVf7D77QnsNiFBh391SVJfh30VfX+lpnvM2XUSvQwoHeDQTIu0+t9tcMm/UfEoh0XRZBCo8YIKUsbL2zySkBxG5c4q0/e5lfcllgplCtfw+42uEV3/38Tk32/sIUWye9ZEJNBgoKRSY9Tjv4jovdMwuvcbT5hjN4qzCuxStpOJyF1anCj+sq1zxB6JoQKuWMVBIn5A9/yA3qkqJ/MSjeIM6twByLjqSRkP+LilR3boGOY8OTehj5CgnMflhCfwfdPmHtHwHuTr52tapaWK70NVxLtCOhkOJSTf25z9vUPwEvxkdYu4nowszyjeb7ziwiK7mGEzyO+n5yeI56u7qE5GbaWGmIBuwXAFMJPa2nmFdvHHLeF17EC8f0R63K7o+bQYBl68AIKKNgmdPYfa48IkY58XA7p9cYpJys9b9SPT46voWgf+N5z3cbT0KvgO3m+mOTxcH4kV7W55v9Ao3m8k2G1CAqv+cOpY4f6HNVFiM4qD0s3i4+YdlhHIw6l5NsmCEVfhJ20O6+oN6jxfow4Nz48xaSjew4hYgOTgZVaT1fdYuUMsbx/oPgIZwdiZfAiq3XMLbeIzYqoQlDPoPmCuKiIJCTwrFxXZSFna5H1gWT5T2SU+ovKikwGHWZhkEjeSVYwygVgVWAZW0xISODRgeIdgO33c1CueouvsamwSHfc7JKSnUYOHcMwnYUZHA/n4DtUbnuUzUuoYH08kCfrprGRxsFIneDY8I6wX5yBuwMPp2JvmpcpOAW5uCBfG1C+eZhffzQt3YpoYtt7lZG3jHaFewZTRIODBKY+wvKEg/rAwneomKOvt6hXN9K4tsq7w/i9b1izrC50POqPv0/sE60cMzYtVTvF6nUv+DvL663kpYj7Vc5KibBro/H+Wd4n/NPWMeWc/VOCdnJKXIDJMWrHR4ZVWDIYLnqNnUb1SOKaE3uOF0xLFIVT34Aaw3iuJeN63rUNsdYYTTX+HfjuflO08Ulz4u43ezwa65l/omFgeFygNFdPp+seQFf9UpVOS7WtJJtR6g4WZoFhWuO5ckqcrSYnvlWySHei31Pk/TO9RJQDwYl5AcnsOkSwrFdYTDAojXSPSQ4LhqaOyrCQziTJeA+/vX2RkvEzbroY+O+lZ/kfv8Bx61oNJntGJtNH5sE5B7nD2B0S2cR3U220L06SVBwXZTDLwRHm4Lfpj3AfDMb8kWTuSyvZcVZd4nI4FR5hN1/3FnBQxg0gFZDfb3L/jgjfwJvp9EbVZyHAnlQdevqepvUYO9SIW6o3v5Mp6e5Z+e7Kii+Q0VRoOzyh/o1go749KEun56H3T9TY4PLIsa6jN4Gr7USeF9lRMz4f7oc3DBY7hXsjFeATe0ynUZnsDQVFKMou6OjY7gQhJZ98xkB3U/fdJl+RYwvEGaPcYAvkn1U2PomvOJLKMa0FG8Z6h314lPf9G3cDOGfWTZAi/L7xf9BFP0LXQPpZSGSBHfWWM0OOxdMwL1C7fUHQMAP2KYZpF1BZQDniGowHycD61hyPpHcMLgnf5ELUXeGt2BtzhQyIk185Mls+M9g1CgjZ4EPU9qCvo0NJun3yuK6cniZOovHZFvy5HuyxzDErM0TbuXZIh+6MrSc+iX8E1z6X6OI/6OrR5tN1wv7BDho+l9/PDGUnSswi5gxcS7T96yAfHoJ+E+J/1ZT21Eb24Z690WdbLlzfROwvIZzkmG+87URrKkA3o8JeqnVJnoTy4DvrsDKXNod/6N71rtK2R8JrdIiToPM8h4YOSfJc6WYw/37YoTZxNyg6KEAKKY36/II2sTJt8EQgKTSNhxlALdOqb9IBQIA8uzZQKDB0pOl4Iyt4kkNu6vTEJCe6JCsGx6ADQMYIIfdXqFldSJaFiEfiGisxTrE0tlQXxIN+lTgYvEZ06GtgBJEA/Xd0q3m9wiZlEqB7dN1MUWg2iyYOAT9HniVGBcj26T5bIpusiCAk//2hmktgn1STH8nYFjIlvJoFfTHWARgXyBqK2X5pFWmLvN4Q7ZQz9oANLMYa9T8UJJrqHWQaQ/o2ELBbQOKH4GwxhkoNroFHCY4R6Qv0mGgZaGnhuKBkIMdyUKBsCCn+4ov9YK66HRgBFgk4ESgcdAoaPUOd436j/n81OkYq9nQQXHTnGKEEGobjT6XnQ2eF9qDEus+1WsSDRJBtfpJs/nsB7R0wO+MJjpDxATqB4IGuvKUOAePYHl2ZIpYdnwbFQhHhPX9FzwMJEo75/7wx5vUZq6FAyOG8+vVt4tDq8A+U7VbEGEayMzgKK9zVqK9+he8MA6FACpa1EyjMVRb2ACB6GS/D+0QngXV1anETyahbf/7ZRtj3E/NyxKF0q6Ga0RW1/dzTKdgUpz59RB45joOhxXbw/yBbe386ANg6FBPKL94629Q61q72ozaAtNlK5vm4Lz9JGkCG8PRjuQ1uem2iVhApxTWsGGcOGbOIYPDNKDcKE9jqLnrmVyoq2kRJlSaPzQjuCrELmZtL7WUJ10ukLiJerdwzlopWD4OmC4bgS/Mun+8ECTlfqGOQGw5cgn2hL6PzOyLdLS/kqkl10QtBjeH78BplINujJ8DBJY+3srxrGnScR9XhCjlUOTW8ivXTX5nbx3IE54iQi3y9T5wM5QdtGLMUNJBeoQxARxEng/YGgQY97SRf+CTE+uVZpnIKIQgbQNvDEgxESyBXqBMdjCPpMMtaeJfJ9Ockh3ll1j08SWjWQGrpR1TEIeO6I0jEgtdBRD1OfgntDhrtILtJM/bu7aQl6OYQIAon3hNeCThrG5iWkxyt3YVzCo/Y1tfETSIdDj/+ddDLkGAQW+Iz0Op4J9Yv2nq3oV7QD6Fd4XH9C/U4sQA4xRA45xDOj/s+kst21OE3AFsU7SaW2C0IcCdSRSswT9TrZdmH0Hvtpfb/j0M5RR7gW5Bzf4aHCfoNyv+OJkEJv4f64H97z7+abpH5BaAbIFQgSvOfN9O5xT+g+EEnE2oGEDlfSd4uQQAiOJaHFi8QwDQIaMdxwaIZZdm5gZwtIEYAAQOB+s75VvEAK4HYiBVfM2JEPAkFjIBOIJr5qebNklQia2y9t8MVD7Xq8KEFKIShepEbzsznJMggulTpbBAdtIuvzSyInGE4Kd54aqcxRFgjwNauapev4V2QBQfCvJ+FGLMpp9NIxlrqKSAM6RxAXxMdAsGWFacJuxAISZjTW32xok/d8+oAsWd6jMq1k4e18kTEowreJwOGaGG6aTS8UjBiuS3QaaocMtokXj3qFYKKxg9jBK2PSDtIxRLniZtCzoLFAMG5e2yZepQ7wQrhFiQ1HAuOJd5AigsUI6+7X81P7yJAKCNyNa1v6xZBEA40InSfq/PpVLdJliUb+570yJJtGwJZqZUOh/4AaPojjqwfniLl0P1gq44WQ4F3CyoXli3eKTukI2ncyKVw8F54Drm2QaxDMi6nTp1cr44BQdwAsGQzzgGAgyPJX69pkB/fcAdmD3hfKIFWp9/foPlBe6NBh/YAgWYiEPFXRKcn0dJtWqK/oCnpv6CC+aOkVl5GVg6GIv++TKQk0XOFQ8t8j6xbXh9UE78xBRIIRa6UCHs8f0HVgA96wtlUOg55IbfxBUkzwYL45BBc02tV2UtaQb3hj3mt0ieOojUMmQL7V2TGbunzi5M/rJakH0XjzkFxpqYFcDEZIogGlifpEQPePqL1upmu+RrJ0QPoO3QFvFtoy2hE6jGf2z5a/751slp4fFdA/p33R0BdDotMMuJ30IqLD2ULvGx5CBIQ/RB0fSOf/lSTLelexknTIGV82yPb3V7J2YbDA0Nq8C+t7TwOdEYxI6LY3qBP5nPRmC3XQsxPDQw/wBICYwcMHEnLHpnZp3Z+eF44JUoGO/ER6Vug36C2QlIfo97MLB5+BbJYePo2M33myrEtcPzeF9HiiqOz2i4OpH4HRCIJx45xUaVTinUBXQhZhCP58VavUpeis/6ToGHjoQFpRdpCZH69sEctIJv+xX5YkDyrOJiIJTzq8FSCTIMbogI9GO6P6uGdrx07rDVoMRgJ0ONodiCj6FpAA9Ikg4kFlutOf6Vq/2dgmvPQD+qSb5qZKD3GyYWgRLSBmF9Mzox94pLxT/GlLhyRgT+zfP1MH9BLabAe1MXg5HyIDBToMhK7Fu4NggTxFTtAASYpECvVp8M6DdDxAx91L5QfpfpzqEIYZRjfWO8JyDD2I2MJPqG3fS/0K+lDIAcoxnNg0YLcIyUnEqtHhgX3CIoTnGBYKyIgUEKq0fUgooEDXk4KJxZAhYHBx4aX9lwR/vcMjj98VEOQEBQchQhwDFN9RpBT2pkYPNy6Gemz6sFWDQCZEPM9UGCeEFEoTnS88O1DWM4jEoBMAOUAVQnHWkLLBsElkZD2UPwQO5QU7PDeiseG+M+168ckuFurFO4JlBmIDEnIECTSuCVaM4Zw6JVYA1t/NRJgwTGCO0I7qjJeYiHr/ORadJBUOYurvNIRd7u5A/yEC1AkaCd5ZpFtUzj7QDm+mbI4MHNNKBYNYFtTxfxrDcQV4HpDY8qhgRlgRUPAgJAn68TEzF3V20bTwu4UVdiYpKHQmqN4DqRPHkMiqDrf8jvcOrwCCNXPM+gHXwRAgrGUM9XQOYeooLBaVCDa5gzK4GXL9WyKJIEW1JL8Icj2QCLaB3hE8XhAPdOSQLZAnKEYE4YKcgBhgjBtTYWEFw1MGWcC7cQeDmCbad28MD+H9oQ0+vE9mv3Lh/cVye0cDSuh5elZ0CjA00L73pQ3eTBgPPiVYb780k7htQZq0zFRDDyVJGGJMDPyGsDwBdDjo6FG7cGVHAu/t9wtSpXfLGGFR4j6aYcyODd8vHGMBwrSN5BjPgjYLr9Uc6sBjlR3lwhCETa+XM4XGG9CxoWOGjMLCRqcOIgCL/uKiRElIYPlCdqDfMTQd7eXRKHoccojhTbT5oUAdcoQh+xkZIkd2hD2Qd5DxBb39SFmnHPbEsDjkHO8PZUQHDS/MBxE65sYIHaPKBfodeK0gEZFlxlsCYQnLoVmsOq6wX7lKSI8PhSt8Sm0Nxu8+dA0QdzV+Su5XiCfK/OOZyWSYJfSb6ABPBAjyUGaAykByS7j+31E6evR9kWFNEG2EAkCXw8OvAsHZkMuWYfBgEBGQFPR9qFvc7wt6P9Dd0Cdo2yohUYH2jaFJEBLEBoU9r3uIkMANegxZJ6rbHnEEkcC40uNECjxBIaPi5XTAqLFqoRTXo+gPuO60g3W0UcgiSwrHwnVdRZYNLG9Y5nArwcL+X1OvjMmQ16XyQejBTkOy7FpZYRBkm5zmppHjYx5lKhRKAKE2xCgLiq/Gb0AQt0d0rrj6Fmo8kUJijGVmiXDMxDv1PXIa2iXTwwG4eBY1mBVnwWuDl4v572hUUKzzkwYuw4I+fJDbyCEnlBl1i7gEZ/TMBjoPw27wEkGgIHwIktw/bUcGMQy9DJXo+pRjpaLRhRWNVbdjqtlwGXO8AKINIgHAMsCmAm7hQ+WYsTscCa+0gViii8dFnegjpsbvClAe6rh6h88vPiVSgam18ATA5fs8deoYU24n5Yd7ggSF21FIKqUkfXg4Q6fVSPkGnP6QHE6C8kP5odTXxpji65JTjMPHYfaAM2J82kNtBN6NZKVsYYs1tuDBSr2E2uMsKvNtC9PlUAeUlTqdEB6ROxelSwIETwK8JBjeiRU4bdLGvgfamxovgOdBu3ZFkW2c+YeFadKKrSDZhxJdRG2o2LZDYeMdDk0qd9zPrkyPhemgTktF3U2QVCV9QKd4PHWkqGLooEjPNQCvDt6VqkfUdt0RY46TWjdo73hnQxmaglxg2AE6FW7/f9e6JCHB0A30OmZsJiuxKCAIGL7Ed1waOmWHjtH2de49EVOWMUMQ7cnrHdjvqHll0JZAMCN5AWIuIv82DiKDMDAwfLuU9AO8AhhyR3uHJ1GNg0KsIWbEwUAAgYKRu38M7z+8+IZB7qPqEbNOJ70dVNsDjoG3E0Nq6O8Q0wipRptSjdmgco2hAHoGdRmpt9RUAyA4g8Uw7i5GTEgw7ga3NKwtuOcqlGAtuPbgJobVMy/RJF2peEmFCXpibskyoA1DIipQSRsdePk2cUSWRVrp6KzTdzEzBpWjEWH2hyqG5YWGhQDEfyhBUer0JVVxfkuKD0IIJQjG+gkpXMR+WPQIJKXfSKF8SfswrINrQXnBuom02tE5f0nHwqOB7++RpYlANSh+6Xauc8nvakT+cVkJMuirNMYURwTmYbiqQHGXvUfMd43SSWgUtz2eEUFmj5Y5pOs3kpD4guGGhUYKdh4rnwiCUtGwwWpvJSvx76VdMnhPBa6P+6AdwFPxNNUdLCEMC6nAO8aYIJ7vlFybaCEGiUa+vG1gxnYM01Uo9/vVvBQZCIkgN4yXojNCEJdx/EwKiQkUD8F1iVTm5SQzd2/u6At+/B4pl3Npw/j6X7d1iI0kH4dkWmTMwAolqC05wosAkgerAh0mSB+UNKwLKB7/IAobkq+KHNoHPGaoR3hIYC2+rozNqgrfpIyrg7igAzmPyrey0y2V/XFySDUs1xgOBAk5PkcvriOyK4PJzXrFCAhfC+PiUNDIcYDjcV+8f8goAt3w/hIStfJotNETySipo33RCgrHwWt047xUKbPo9BEMp9YjOjZV0YEEo54xph9JSLyKBQjX9iHUSX0TJW+4EoyPK2YkSsWLtgxDJJL4RQ5/wVr+O7UjeCYjCQmSF0L5QnedQvW1gd4pvEZroggbnhAeJ8Q/wdtyUVGiDO7G8AHq8Ku23gmXpwQd/5Gkd1Hq2ze298X3oM7uWpwu48jgwcX+FiIMiE/4OXV6GK7GMIWKUCjsGQkoQcaIN4FOzY+w1GNB1eM4H5ocbn4MPaL/QEcPfQLDQE440IaJQWmEjrmZdAzCBTCUnQQdQ20QkyH8yjRW6Omfk6zjWimm/gYB3iWGHyAjnzf3yPeO0uC5cDxE2qf0LzBI0Och/i8a8AR/b5q3b5gW+hpxgDgvrF/DBkKzxy+Df6eTnO+bZuo7H5wfzwcjGfK3MsY9YAggz8hJuXrxk9nJknDB+NYp+VsAuzJqgH4B/SG8SngWkBjAGwzH9AEYeoaRi+NXxbgfPKwr2j3iZKqfa2Ylyyn3SEWAdoOYnDW7yHkyUoyYkGDMFJYBGvm/61x9bqd12nDAE9x9IBi3b2qX0c+YhYDgSswQUEma2nYR/HI4VdAhmWY5JimrLOqYaKgWqToejcbwk1XNkjzAxYjTWhXXC8Y9EQQMFxrGwn5KL/THVA5UNMoNQnQndToQYozBge2eriQY0yhlUL0nAWU4CAmlDiAl+fd9svqOgTLH8BEUOBQlrnMY1cFp7Qniz1s7BzwDZlmgoZ+aF+40XlOUv/rcYNkHpptlh3eoEigFoqNadHWK52QpdUKXk1L+GLErUZ09vCuYRncXWaNHEjmCV0u9Tq+SswQN6ioii1BOzx4Yjm3AftwHhA9WAMbafz4nWZILbIj7OO/rhgHPhDLdtalD/HlJuhyHRZQ8roVgRow9I3isMMaspfEEBEwiaBFy+h4pFngKVDHEENupJBuYJgplB/ILRYUYD4xRyzFj5R3gO4jcn6n+QUJwTcQfBJTZXD2B2PKNztqquPYh3zgGnTkCVTHcWKUE28E9jFupihbKDgFsaJtP758tywwXL4ZP4HlDPNSd9A4wdInjnj1Afdc7ZBvBa/dQeUF+YByoQ5KQF+yHVwjEFwppCckdyMAnLT1SDiOBe2M6OMa9QcZAyj6NmHmHuA54UTB9GcOS6jmQOb9CtBEYDyWeadLLGLGLvhkYMP45tTPEGGB4DUGO6HzU66jPBP0EI+i79Dx4d/jdF5EYah21fwQgHk5t9e690uXvb9V1i/9bOXDsFUO5T1D7Bwn5w6KwfsB10PYeL9t5wO94BGQX7v1Weh/o2BuUIHMYcJhpkpdllfIEHY24kd+RUQOLH5vKp8NkgsghEbJ/1ThlnA1iJG6cKwbo+mgkKskt4dlCx4rO7vo1rSLLohNft7rleTAi8T7t1CbsBo3Y4iQdQ/r6z/SuEPtyboSOQfI6yBbk+MHtneJSMgJgCGAYSn3vKil+Vwm2Rn6cuxZnSGKCXzDb61OSaRiaaPunK+0dhHfZtwM7b+g06GoQEpQXMS1tStA5rocYKnhIZtiMMjYSQBFgMOP4Dl+AyHavONVCurXQJj5ocg3I0YN+Ac+Mtoug0mcO2HEdh88v+y4QFvSDiOV5RElyFu67gn25qaBHQDDRr7yaniPb9dlfNvSbqQPAkELiQQyjI0nbywflhMtKz3X/tk7Zl0YHj48GRtwzoDP9AzFqTJOKHAOD4Px2Q5sUcgg3fvo9HYeGPFtxOSMmAhZNuyec3AjCc8WKJml1wuqCQlenGrV5YruGPiRlhqj67d1hKyasFPoHQ0JxIlMqgqzUqZWPUKOCkMHCMSkp1zHco7rv0Pn+bE2LzMyHTkTNToqO/Ssl2BLnfO/bJvlSIRxgmWhQaMBqXdy3zSEZJqzDT5pjB2mizH+kBgTPAToOTHmNBCxMxFaoZcWV0WDX0fG4H26FREKoN3RisBzwHHfSNSHAPoW4wAJd11knvT6wHrEPHiQIb6Pyjo77tE4cRQpZdcd3KZZ9Pf2O4x8hJbyG3jUCLNH9wQIOTw0LR+U7lfsB/yPlfCJdDwFkcKk6qP5RBnWqLAKu7qDGhVgWNR0zZpvA27A+ThkCIwEXMlzF8GK8ERUpjk7y9yTfILlQtrCQMYMFni40ULhHIbvpRFxQxzj3KzrnTGr0mCaK3xHX8z06Bm3FGcPHj2ESeMTQFhBTBcBb8VpUcr93SdEhzkkNkoRsXL2yWZIeyCWUDdrn5y07lOhmIs0nflYnA/fgzkaHimmw8KSpUx0xRg2ijIRh6vRjXBvtRiVZ55PcwWOHYc3BEpxh/01rW6W1CwVWFTFrAfKJdga5mEZWF2QK1mi4nYXLCw/muV81ymD0KiJiUOCoA3guQYyCipKFvkHnAtlUhwYh198q1v5D1DEhxgRxOCgvdBB0xypqn6gjELurqN7wPPA2Yt9K5TcYGKtJJuHBwfv20H/QZ9A/uB/0QynpIHhmVO/IF1R+Pylz1WOJoTVMJYUBt6usyHsakKvblbK2RixxgTp6gOoNEwPUpHDw6ELWQGKEJiQWESFHZ42271Y6VwRCg8TDC436A6mFJ2OwJIx4l9DReF/dSsbhb6MsdsgeAjAx/F2ryBDI7EmfhXVaLB2DDhgd+AdUFsQ3ot1BvjAstFy5PnQ9kg7Cm7C3EuuIdwi5begNXwcGKuoGzzxYMDLu9Y8Kh9Tn0IFvRcVKQs8eT/rwyExr34xNlBf6GjM58czXK/UGL+G3bR5Z/3/e0tnnuQYw2nDyZ/Uya3i22nbpOhXdYR3gV/qDY6ht5yhGH0IRoKe30Lkhpd6gi+DtgH5DHSAQHKMICOIN64OwToIXCnV8jKIr0A9+QrpdzcyMNoxkdXgmtW6gN/A+4ZUfSS6yEROSFwfJIooiRAav4gExq6ZLWesELkC49FBYsFAwO1jmyC8AdzQqCAF4sKrgaYHiiQUE2Hyxi9kYIEQPRGSjUwHluGknke6oR5CIwYgEAMGFxffpIMegMcKFjm1nQAzK9hhlBEJDKCsISGRGV1jU22NMCUZD/WfF4BYcrO9nKgfPDIuGBhfnF1HPg0ay1TmwfMgz8+QgU0RR5oeinhljqx8MMRBurAHCCUIQC1DgT0bUI2T1KrK+EDsExbwwySyJBNzGG5T3BqWMeKbNdAyCfZemJMiOE8SyJkYHhYa+qyy9QCwZhVzCQ/jhTuoS5PyVmp1nLkYn8PJOjsE7fH4XGZnRBuB9e3+QsqgzEXYGKLrIjmCw62E4J3pIRwXaEVztsdztKgZ7nncHKR9Ik0qcooEOb3nEvUDAkZdhPALG2H+bYr8fkJEvlWeEHod3G3oZ7wOGzV7KzENMfUXHiK4WuSxqqQOEPsFwIGbEwNgaTA+io9wSQ39EAjrtyRi6Cx3tYDpGBcjk6p0YOdD18O5Fe/j6XYN+21VadrSpaJ0WCQyDPrUTGQChi24L8MRHQ/VWDwbU1b92kpUcbSEylisSsXQenmswPQBPU3RW9l3V5a4w5r5zWJuY+js3yRReeyMUVuovljulmwsVBGsTY3QYhzcogUlgWBhe2dI1tpkCGYzdASL/4cE6vcAmrWUQbQzdweoEIVZzZZxMsn2+PhyrA8LxMXUCd27q2CPrPjEYuwvEyCCoHvF1iJGDDd3lDQd0PloeToIF3R3OrWSQHglY+TCEMGS/M4LMYKgYc0ICD8g1q1rkdFKzLhx8B0JSqkyZAyrpOxKKIfgH8WjYX9cbnj3D6poxnoHo/Iu+aZTT+dSFHmtc/n6ueUzxxpCBXOwRsSP+kPQqDbZaLYMx3hCUQwgdctgmQRmuhNcbQ8pqTAZ0/aXLmmVArEkxPjFEgiGQyZ52nzE6GHNCAjGUwxI7WUgLY1M7G5ZgMMYr4O2Ino8fDYwBt3kn3yrIjKkD6HGQ7J3FwOCYWMO3DMZQMb6nO+wBqPPLdzZdDxHniMSG2xGR1BhvjjUGp6bSR8BbyxBX0mUwxhKwZne1/DkCEOfajTI2A2tCIdFXLO8NYr2Qhh5j3TzUxBhPgBo3KbmldnYMgs8RlInwAcR6DRYniIkX8NCzobxnMSUJCSgIIr9/MM0uupXocARkIUAnFsOHKx65CzDVEZH4W7t8sQmJFus9JMlphKNFSNDIsHYGst7uzMvEYKiAfGOmz14pRpl3ADKEAMBnq7piEg3kKkAabRDpH5cky2DNWMeBuIBw/weL441SWRclG6kdJopfrW/baWfCYMQCpnNfWmwXJl04OzRmUcFwjBVEi6xOSH//QlW3nMGJgNvBCAnS4v+vyTWqhOTmuSlircMrCT8jNqYkIUH+CCyyh+l7mMUDPYjsglDBYNFFVoMMRERmveiEZmqaD5AUKGjEvcAC3RqxTDcSLCGPBKYzq3EwYOSYRgxvC6YwYmwVHQVSMtuUTIQIhkTyMUwfxRQ4LMyGmANMRcTcckyvwmwOTH/EfHT8jQyXuAfKjanWSIaF5yjr9sZcRZYxuQG5xIJ6yE2D6Xd1ykrSSJoEOYVHEFPRobgxw0eddq3mTlHlG+1hjpLArUHmPwkfF17wMTy7AvKqTpfPofaCJFiQY8zCQCwN2gamMeIcTPuErCL1ul4SJL+ceYByoL0hqB3tBddErA0WwEPmylL6GzMLLMriX4hDg/yvd3gGzW3BmBpALqr7l2TIGUyYnguZg05VF5zDjKB8ZcV2xLpAz0cvFaBm5c6W2WjDOtil5ORAkj5M98UMovJuv4yDgSxjvS54HpuUdgGdXkRyDj0NMxS6F/lVMPUWnndMp0f2WOjrBdSmWhU5B+mP7mv8yuKmOBePgXi0RiX1wlTAlCQkiBSHJyRyGqCa0hzJq7CSJHKAnFcQzlCInCTRgBBOI2HC3Pir6Xi4sdEoIESYdQGljERwmG2BXBVI2gRhRmIrLN+NOfIQ0N/MT5VTMHvkkvUamUUVncb/lSTJpZ7VaYcgKWmmoNCT0J5RiEWhTOIzsgKQ1fCRsi65mNldi9JkEih0Eg29sdM7MyY3kEfmnEKb+N36tr41kSDZkAmTsqZGmkkrvXzIpomEgLGAnAzIyphEihWy+LsN7XI/VuZekmySnQEW+kMOGihnJKmCZYpOAHkL/rq9U5xG7QyeSHheQkqCK1i0OPdH8ppt8ppoF3Cfo+NA+ZECoEVZ9RseGST2Qyp/tCdMQYQlvCH2rGzGFAKyjcq1Vki+1JwXagJLDC/+cXG6HIbEGjDIu/NQaeeAa4CIgwBg0gWWg0BOHuRIwv795SrAAXFlTpKcTQRdDM8jpvqjX7hqepL467ZOOZvuTjJwX6FjkGAMuhceGBByLKGC/C7I9gwyBMMAsl7lCi/2+sOovublGqf4BfUV1b0+aeSCqDQOPlt90mFKEhKw123OgU5nkImLiuxSQJCZsIrYKdboUdeXiYRPWTcEQmuh62GRNRAStAvkaEHiOMSUYNE/zBuHsJ/yeb1sQL+bnybHKJGrANdBOn2wcrB1nCOvqdPK5DVoBGhqIB8gOSBCWCPkJ6ubZTAl7gc35N1b2qVl+T+OX5nSwLAhZCfW8B6sLriij/usVs70gWI8g/4udQ10S6PTh6KGxw0keaaypDm8GphpgbwJzx+YI6d5IqkU5P2fZKmibf3r4FzxHLUBJKzC+jGqixpeFyhq3BfEpMRmlJkfEfD7vrJgGDoOkJqbiVBh1sYj+2TJJIogM7AosbzCSBIuMSYfYNDBQxE91Ac9igzRazq80nuClWuRaTRW7iw1CzeyaoMsILcKCInM0kx6GX0BEtvB64i8VxeTMXnpsibpzUAysfNJvyP/iFXJEN4Z4THcR7kmPPIgJEhTiWH/dxTD9dYFA/uaN+u7ZTbalXWeXeawmoyYkoQECnBRsmnAfowxolNXx89hocFVnGIYmCIXa8dcThbmpcubiVxYxcERi9GpgGsOaXdxzUBox+JTUMJw8a1o73/8KbkJcoHAy5c3iZNzbWLflIELMMElD7e1muoXMQJw+VnH4SqijD0PiC7kFrl/ojMXq4vUuZVFJpFJFqmzK3r6kxd46G5fmC49LPDw5S3N7MuAqgItxOkLSIUL7x0syfDiXShDUHpSYl0THkJcs0C5ZjTQVlA+NWi2i+4BC3ejj4MLGf0B+UUcSALpw84I2YR3I4XkEpmB1QXlsEXLJIDzH9g7U5m6rxX3LM4YcAymN2OJJRB9mZVb0b24f65Z128ldgCJP++na17wdYPIMOvFnST30RhOXzOVMCUJCdxvf1mSIb5LBADuPq+yUBEybSLNLzwTSJELpY60vGp6dTVHCtbqAjsPp0wOilhT7CGkSC2PVSOhhJEkCDEkGI+HtakmhYsElDvScoPxq0vCh5Q1ZdSUw7g/0nwvpY4EHQYWU0OOiy7OacEQ4VTST5Q5xFUlSTJOCani4XWDMoa8dslssqbwomXJRplpGNPuoVPBDyBFIC6QxeZOvyTSseQb3pMkIiK4DtTxGfk2uSor9iN7NyzISJIOaxIyjDiRSIM23J52rASO5QrQMSAJF8qL1PUvVneTBTrOV2Rk7HFg/aCLi8Pr6mBCAjp3ePQgXnJhONLvWLsF3jgMiav5ULAEixoPguFGeOJACJIMA406SN0+ZBgihgTZi+EhRDxfR3NAxj1hmQBX1DowIBY+ec2QSI/4CbFPScpCj+gPsFRGdF+DVPJTGf0ICRYNxUhEvAqzpwDBvG51i5zKC9LgV5QuUvs+WeGQ43q3LUyTivOB0o6+lUox7IJhGYzR/6O8S84Q+P2CNCmASH8M0oJhFQypwBvSSxd+FQvmEWm5e0uH7CS8dFGkMsYQTJFVL12OKrmHRYvxyVvpmnBvb+7yyHvDFX5eoV0uivRspVOuIYTFmuBmRENCWmU0MKw9wAmIBodWKyb9WBZk6dW6btm5w50MhYqHric5Qzr6325oFZeS7LiVzh/DhUlklR1M7QBDL3BrH5BmIjnrkooeJNqvrAECmQZh+eW8VDk0g2NAPHAdBGdjkTwoDwwfglhj7FylESDZkGMMiWLqPK4lg1jp3NWdbnHj3FTZdtDO0A6RphzJuBC/hVlraBfIgcHSvWtoNJqQJqzSJjWLg4z/dFWrOKMgQcYd+RQ9/h+5vIBLTLPpxa9IJlEJiNkDGcfw+XcyzOIp0pmYXQZ+gDYAmYa8QRYBBKYemWWVK02DuCCtOnTx7ze2yTgSGW/iw4KEThlEizagEm0Ml37Y5JL3lsM0yjVfq3OJHxCBQp+BWJEnyx3iiqi+Bu0XfUL3LqbqT1b0ERK9Xu9PM+paWjyBrHgWaE8AcgNFOlhA360b2wfs+3tEnn91zYxfrmsbcNyPVzXHzG2Chck+H7AOjE/8ev2Oa2DmwU0xrokG9p+o1Ms3rm2Nce+Bq5NOZpBV3QvlO9TjsyzGxsqQmDWWZRoPCCprxMRaJwaW44r2/nLSE/D3yWHk+iyxxrDh2oarOVphIrgvGpHr8eAlxWoDABYRi8SbpLjfjFqgDJ7GlWO05Pl4hcWgH1G+dbvV6rQ6tC6XP2gb7TKNNyCg9KHtsSOc/xJjhXXE5KkTBW5QdOjy9oFyBQMSatys1fRNeAAQK/WLGLr32tU72hRms92+aWDfglQR163uf69Yfc3vY+ybzKD+0qvX6aRrqI+QGI1G7yy7cXOLp3fSE5Kxxs6SrDFGD3Psho0GvX7IKTEWpNvWfNsiDh3LMk0FTFXrbU9jUU7ampGcl52W2pDeLpqnAiEZS0CN93BunDFHtllXl5GaLBldHyGxWiw9Z6W0Pb28XXMQdajG+BWPwdg1wKpPt3ueM5lMQzabT8uzvfRml/fcqeAFZExsZJl1DSfOyH5jJOfOKMgtPbqx5u0nXOLa0S4XgzHaONLqea8ot6gS3/sICVzfZ87IfOHj9obj324PnTUVYkkYExMYH/9Osv6j781Nf0Kr1Q7ZXF+an7Hshw3b772r2XQHGT5TO5ydMW5h0Gp8Py8w3laSk7FtJOfD/f2jIvM9K92Bg9Z0evYZ7fIxGKOFmXbD5mvnpN5tMhqlYdkvqNVsNrl/M9t+o25jm//9bsNpvYGQNT7FZDBiw6jVeA5P1n/462L9DTarZWBigZ0AcVKXLCp82LmhKfmZVs2VHb5Q6liVk8EYCdJNuubz00JPnleS+g+dTjfiIOz87KyaP/mbr7xxfddD6/3mpb5gyDCa5WQwdgd6jca/wK5f+/vC0E/z0pJq+vZHH1iUmVZ5f5Lt0vfLm1//R4vmR6va3fv7g4KFeTcQ0mh0GhEKihBPEhgp9Frhm59kWnthiuexM+bkvmg1m0YU8JeYkND16/2n33zo1qr/PtNpvPLjpp5je/whHmsfIUIa/NPqtMHQ1J6vuJuwG7SOI7MsH1yc7Pn7ATOLvhxOsHYs4PzFBVmrXkyynPBCpfPSV1sDF2zq9C4erfJORYS04VEDTZBTYO8OZicaN56aEnjpohlJj6cnJ/aLsI+ZhwTj8qfOLXjl1LnilT1TxMmNp556amVOTs5fjj322GfjXRZGGIfOLvroUCE+inc5Jjq2b98+b+3a1RvPOussNlrGIZITEzuvXpR479VC3Bvvskx0PPvss4/rdDrH+eef//N4l2WyYkomRmMwGAwGgzG+wISEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhITBYDAYDEbcwYSEwWAwGAxG3MGEhMFgMBgMRtzBhGSM4Ha7LYFAQIfver1eSzC7XC4b/jaZTB7a54tvCRmMkSEYDGp7e3ut+O73++VnT0+PLRQKCY1GE7JYLD34jG8pGYzdh9frNfl8PgO+GwwGvU6nM6h6nP72GY1GT3xLOLnAhGQMQIpZs2nTph+Skj4ef9tstum075rVq1efScTEM2PGjNuysrJWxrucDMZIQGQ7YePGjX8nZZ1GhEQq51WrVr2CT7PZXLtkyZIfkeJmws2Y8CgrKzuuvb39h6S/daTHF5D+9pMen0mEO1BYWPh4fn7+G0y+Rw9MSMYAENCEhIRvqqurbyeFnYB9DodjEbaMjIz/kQVZHu8yMhgjhclk6nE6nTWdnZ3nk6LWYl9jY+PxUNKzZs26ickIY7IgOzv7661bt/6d9Hiuus/lck0jHV4zZ86cFUxGRhdMSMYIRUVFa6uqql7q6uq6hP7UYB8Jr58E/N7ExMSOOBePwRgxiHAEFixY8Levv/76e6So89T9JNfrcnJyXohn2RiM0URKSkpLWlra/c3NzX8g8i37SxDv3Nzcv9P+2niXb7KBCckYwWw2u2fOnHnfmjVrzgkEAtKtnZmZ+d/8/PzP4102BmN3kZWVVUPK+snW1tab4c6Gkk5PT3+Utrp4l43BGE2UlJS80NnZeaHH41mIv+12+yYi3i/Gu1yTEUxIxhDFxcXrKisrX21ra7uYrEp3RkbGfQkJCc54l4vBGA1Mnz79aafTeZbb7Z6blJS0urCw8PV4l4nBGG0Q+ahOTU19srGx8R7EBxIR/ycR8sp4l2syggnJGCM3N/dBl8t1nNVqXT9r1qz/xLs8DMZooaCgoLSqquppUtR3Etl+iJR2U7zLxGCMBUpKSl7u6en5PhES44wZM57n2JGxAROSMcbs2bNXtLW1/Z0U9pdarTYY7/IwGKMJItlPGQyGRQsXLnyGlTRjsiIrK6s+KSnpHpPJZE5JSWHiPUaISUiQZ2BzZc28Nzo0565udu5X6/IU9viCCXu6cJMFJluq2Vffc3VwxQqefTACJBi03XkJppp5KdZ1Z6aHnp1fXLQBgZW7c83qxqaid2u7Tv283X90nctT0OH2p41WeacSNDqt1pSQanG/tKoi3mWZqMiyGhoyrcaGI2yBD04syX49JyO9YbSu7fP5DV9uKT3kPy7Tdze2uRbXdLunBYJCN1rXn0rQWxKMIhTU+Det/G28yzIRoddqfPk2U/WcFOuG4629rx88b9bnRqPB2++Y6JOc3d32p1eVXv5Eb8pPa3v9hUJY8CbYl7I7QNepN3Id7gZKPWLup42hY//dqbvgivaK+743N/uxZLtt2LOV/H6//p1VG797T1fy77e7dHMDIZ1OaExSzBkjBGi2JTU13sWYqGgIiQLhEuKjHs1Jr3Y7vnddkef2o0py3ttdj2pza2vmHze23PKWy3JuhzdI78dK1pF1tIo9NYH5kha2zUeKGp+Y/nWzOPzfhoQLT3OUP3/dwuw7clKT6tXf+3WR8Iw8uGz79Q91J9/kDQaMe764DMbO0egO5N7ZoLu9UbTl3LrUcv1wPCUISHt/7ZaTftKU+HRPIMBahTGu4A+F9CtcmgOv3u577h+h6tO+M6vo05EOgzm6nEk3rah/4G237RzS7KNdVAZjt9DpC6Y81WH4Uf12d+Hf5+sutNtscrJHP0LydU3roY/1pFznDQaZjDDGLaC4n2sKXnFYWeP/jp2V9/ZQzytv7ii5uzPpjp6Aj8kIY9yiyxdM+m298a//yu4+Oi3J3jrc80G8X67q+v67HttZY1E+BmM0QExb858G1ynPGt2XX7VXwl9BvvsIidvtMT/eFLqm2x+0x7OQDMZQ0BMIJTzWor32sCLPh1gbaCjn/LvWed42Z2DuWJeNwdhdbHR4F7+9re707+8757HhnlvX3Jr/bEvo8mBIaMeibAzGaOKFDt2lJzU0vVaYm121g5B43Oa1nZ6l8SwYgzEcrOn07Ovxek1DJSTLGjsPCgm7ZqzLxWCMBr6pbjp4JISkqrGpqKHXlj8WZWIwRhtVLt/0htbOnH6EBMF+Te5A7s5OZDDGExy+YDLc00M9vtrpmSbM7ABkTAx09LhHFCjscLqSu/zWpNEuD4MxFugNhKxuj9eM732EBIrdFwwZ4lesqQm9RiN0WiGo7kWQsziMKfws33sUYIoGbZgvQr5ZvPcMAsGAjodr9ix0mrCs+4Myxi3exZmw4ImocYSFpPi+vTPEiTkJ4s9bOsSD2x0szIxJg7mJRvHMAdnCSF3j979pEqs6hzSyxmBMKICIXDszSVw/J0X8q6Zb3LS2TbgCPLNpJGBCshP8bn6qOKdwcBd/TY9fnPVlg+j2j0z44B1J1GsFjMgkg1ZoYEwyH2HsAeRa9OLhpZlihj220whi+HRFl/jz1o4Re+5AuM20kWgLq54NdkZ8cE6BTfxuweB5DwMk4Cd+Xi9qSZ+PBFDbSQYdfWqEneRcivpupW2cumBCshO0eQKi0hVOrppu0olpCQbpel7v8JKSDonG3oD8HCmcRGSuW9MqMunauI+Px2wYewh+krW6Xr8cLoRCLbEbRTIxh0a3XypmSGKHd/e06tpOrzjjiwZJuEu7OUkxIz5w+oJ9ejyB2MIcknXI9+Yur+gNBEUgFG4PI4WXzr13W4d4ra6b+gQ/YttGqeRTD0xIdoIHSx1yA84rtIt7l6SLFiIpZ35Rj2mnfcdlEKE4Iccq9k4xS4EHsfig0SXeqHOJ47Ks4phsq0gy6uT+NZ0e8WpNt2gjZQ9FfWa+TSxKNoq36Ni3612yUziX7gV3d9iqDIkql188VdnV11EwGLuLZpLjq1c2y+/w1D21f5aU03/XdotbN7YLVbwho/OTjHJYscRmoL81opNk9+Eyh2h2B8SFRXaxkOTXqNWSIg6IDxt7xP+aeuXQY7pJK64qSaLrC/HHLR1SfhfQtU7Ns4k8q17eF23iy9Ze8Topc5efpZsx+niPZBIbsDjZJN46JFf4SD5/tKJJbHHuIMo20rdHZFrE4bQlGnSIqxQrOtziyXKnbAPwtGSa9dQ2QpLgPFvllDINQn9YhkWcnGsTK9vd4vFyh/SWnFsI3W4SKcZwpv5mj18O6azp8LAeHwRMSEYBF09LFL+YkyKU+D0pbGuV8fJT8hL6DfucSUK9hIT0mlXN8rj9U82yI9jm9Ip3GgQpbJP47YI0qcQjcSgJ/GXLm+QwEYOxp4AhxT8tThf7pJr79sEC/BsR9TQi4j8oThTTbTuGfc4psIv/I6IDcg0FfzzJtkmnkQRGRwQEbeViOidSvE/PTxDziYDfvL5tDz4Zg9Ef30k3i0f3zZIBqipckpk7xQEk/1fM6D9x6chMqzhTGbKH1+VU0vUa0upPVGikkfp70uN2Q/+hylNyE8T3v20Sqzs4nioWmJCMIjY5vOLoT+pIEWukGw/4y7ZOcQtZnFDiV5FA/3JeitgvzSxSiTW37cQlXt7tE+d+3SA7hJcOypFM+5B0i3i+2rmnHofBUBDW0LeRHINYpBq1oskdkPEhlyxrEhVkLZqIjT97QLaU7UOIPL/X0LPTKz5P1uX1a1rFtbOSxM9mp4hT823i/u2dWBpgTzwQgzEoINsnfFYnh+wh4/D2vUkE+71Gl6jvDYhjiWQ/tDRDegbzLHqx1ekd9FoY9rxUthG/eGb/bLGAzjkjzyYNVh6hHwgmJKMIyBeEt9MXljR4TL5DJOKcQptkzFadVlqJIM0Y2tkZIQEQqL2diEmp0yf2T9fJ4RwGI16AAsUQS5NCGlIMOnFBoV0cnmWRhARxVqAuaqD2zoBJwGgrn7e4xZUzgtIjiIDARo4GZIwDQPf2BkJygyhjqPHyGYmiOMEgjFoEa2ulN2RXOhk9AZws9b1+sabDLUmMnMCwR55i4oEJyRjiqEyruH1RmhTKamLIvSIoQqHhr/wd4KnAjLhioPxBof56fqo4q8AmOr1B0eD2Cw+RFdswr4yg8JAQrKAZ4xYYRv/jXulyZloFGYiIH4ROjh5W3xUCrMZ3CSYkYwQ5c8FmEAaNRpS7fNK1XWDRicf2y4p30RiMYWKg5oUHZJYSO/J+g0v8en2buHluirgsapydwZjoyLWEvdMYhv/xyhYi3kHxBOnxQit3n6MNrtExAsjwV21u0UtCPIMU9xdH7VhaYqR5SxiM+GCgaQdr79OWXrEw2STOL7LLjcGYjNjc5ZNT5GfZjeLdw3asrrI7KR8YscGEZIhA4N4r1d0yODV6diKCWV+uccqAp0ggcOmUz+rE6fnhGBKgk87H/HeMKUKev2jtFR2+AFb3lH83eQLiFboWAqp6lDnyHzf3iho6fquTczkwRh9BgViOXtHuDchp6ZHiDavwv029oqzbJ7ZEBe/9aUuHWEfHH5ZpkePqkNUmt198RPKKWBPkf3i9ziVjpjCsg8GZFR0eYdY5xfL28CyDNtr/eq1LJgXs4vwNjDEGZPzV2m4ZvxSdL6SO9Df0OOS2NyLTanWPT5z3VaOcxotcVACGbTD1d5PSJjaQ/n6p2ilnz4Co9JLR+RrdB20CqSKAlR1uYanWiGXtbp72OwiYkAwR37a55RYLiL7GFgtIorbe0T7odR8pc/T7G9N/f7Kqpd8+zD5gMMYKCFZ9OEoOVUDx3ru1I+ZviBnB7ANssQBy/ct1rf32QWm/FDFTDEr9pqhjGIyxAtImXLe6JeZv6x2eAbpXRS0ZhPds7Rz0uu80uOSmooNIzS/WRsl+TbfcGIODCQmDwWAwGIy4gwkJg8FgMBiMuIMJCYPBYDAYjLhjyhMSvSY8pdG/k4hpTHE8MM0iA/jyrXo5rl7lGhhgqlOPc3g4QI8xLmAm4XXvIiVkllkn8i16sZrk++B0i1jZ7pEB1dFA4jMkhlrV4eacCoxxBWhxg3ZHhuzBjtkrxSQnFCCRH9K6YzJBLCyh41rcARk7wthzmLKEBAr4vEKbXPQO8JCGxWJJlTGIBkjL7QvTxFUrmuVaHNudPvFkxcAgQDQIrF/wk9XNMup6NIDZC7+cmyL+UdElqnkdG8YQoC72hQyqyAkpV9slmcXsAmeMKedYp+bsfLv44YomceeidHHRN43UDgYeh2yVP5yeJC5f3tRvccndwSy7QS62d/+2Thkky2AMB2mkv7EWEogyRBIJy95v6BHftg+cgIAs2dfOTBYvVXeLXKtOzLIZBw2ovnpGkvhfU8+oBqFeMT1RzpT8rKV31K452TAlCUmRVS/u2StDPFftFG9vd8gpilhbBl4SKHOrXkMMOrxqr5ozRE2FrS68hA+kfwdhwHmRuUUsdBCu56YWolqaOM8uUwZrRA8dC+WLa2IBMhAeTBVDZ2HR9b8mjlmYZBI5RKDwu9MXzhKIa+E8XMdF+8Pl1krmj+mVWDl1Z9YCY3ICcoCVeb9fnChu3dgmiTH25Zj1fTIXTu2ukbLpVlJj98m3ch3Ia6Iir246TiUgWmW/SQf5DMk2AmDND8guVkjtInnEbsgx2hKugnuD9NsNGnlNTKtEWm6ki19E8g3vC7yKkHk9nWdT0mCq99ApbQWdCuQfx7J0T23A+3fPXuliVYdH3Lm5Q8oOZMSsKGl8YrmOoCKTgE7xiEcmb7fqwqngIVGY8hsWdY2UX+hxr6JjsTtSZ6vtIlr3QmZJzMPXVPQ6rol8VHCco02irJDr6L4G97DI8oTbidpGpwqmJCE5q8AuWj0BudS6inZlXRmsOvqrealy2GUmMehnq7rEl60D2TZWMAUzhyLFejV/L3XInCIQWHhRMPccK0Bi6he+XzzNLodzkKcBnpQ7NnWITLNO3LYwVaxo90hhxLx1LNyEVVSxkN6D2ztlcjUIPFaSrOkxyWMWkwL/Hl0PS2cjG+wdm9rlcvIP7J0htnT5ZAP4oNEltnHekimHZFKg/zczSdy7tbPPSwd1Vk9yByV6Hsn+sTlWuZQBFsm7ZUPsKemZJr24iGQMGSoxpPNrZSXeAqtBLhIJuYfHDjIKef7LkgzpCocC/7qtVy6ed26BTRyVZRVrqS3BQ4McJ+cV2kUK3Rd5edRrZtP1Lyyyi9Jun/iwsUeWH0Ojvf6QzNtz37ZOMdtuEDdTu/ya2gPICDyGPibcUxqHZ1qlHD5DOlo1CPHZ7RdSxu4lo7OWZBTHQDe/EsPbAe5yJMkokp4tTjaSru8Vj5Z1ybw4J+cmSEMQwzfw4H1Dsgeyfw7JNbzkxaR7b9nQJknJfST/mDYMQxCZi+dSP1JC19yLrgmPyOPlXZJgYOHJFJNWvFXnkqTj5rn9+xrknLplfpogLiPqqOxfK+kmpoqkT0lCkkkCiqRl0QCZuJKULVYqhXAcmGYWP56ZLJa1DzwW1h4y+M2xC7lCKYgEhB468gkSPgjZFdOTxA+InLxe3y2XZT/rqwbJtG9dkCaFHQIM9vtQaacUZBAPXHO2PZzD4ZjsBElIIIzPVDplQjWU8edzUqQyX9HuFheQIr+kOEn8ZVuHdF8+Vt7al4iHMfVAelgkGnQy3ikaORaduKokSZz2RYNw+ALihjmp4mxSrrHGyUHQQWih2KGQ5xMJhmeipscn8zHA44HVS7EK9V7J4XH5329slwTmuQOyxQfUhpKpHEgUdZ+SRwcKuO+ayWYxj5Q2yAXuD2IDxX4oKey96LfLljcKiPHD+2TKdoj7NVM7e4CHdhgKYNBh8dHuqEyV0KMXFCZKwowVqtOJADxNsvrfpoErUMP5sIX0Krx+VqdGnJprk4QEIYXIrwNifQzpduhY5JT62awUce3qZrGJzrmEdDv2P17ukAvmPUxGKQi0RikEvNil3RpxCl0ThATek/eIrPyLjEro8b8SiYnua74g41eWl/T9p1NwaGdKEhKw6GJlHY5IwI0H7wQWCoOIt3uDUlDTjQMXxEPw309mJYs/b+0QGWY9Mdwd18NwC3Qm2PnCJKNcFRWCrwa6QrHOTjSQBdu/0wB7hlDeQ9fMJmZeZB1YRli5cHPDw4MyNvQGxAk5eulGZDAgZ55AUGaUbI4ipnBfQ8ZBRnBcAxEBBPk1Rh2H0ZKfkmynkNw/X+0UJxAxhhsZij+ojNPDjd1F14H1iUXHkKUYHgt14bH8qHU+ILe4JoaLXiBL9aScBDnEEx38DQ8LSLtL6WQ6iRjBC7ixa3RishiTB9DjuURKMHQT6S2DdwPrz1SR/oUuhkxCyhA3GA3su3/vDDLowkYhPHoq/MHwwo8w8BINGqljMcTS5gkPSYKEg0BbdJp+1ywg2YfHBMS9V5Jsq4gG2iH6laH0NVMJU5KQIKPeXYvSxd6kjGFJQjlD2LzSAvTLQLuPmoVUqiAPEEgwZrBaHAv5m5agl4odMxKmk/IvSehPHqCAZ5MFWNrtlx1DeBxfJzP44bqx3HAIzIICxjVBcAotBnlfKHiTdsdsIFwPxyKlMZT11i4vr4/DkECK9jfqXGS5JUpZbiSFp457uwLh2KU8UsJNJNcg5fBggFxAuqDIIZPwZEB23yULcdMgsgXlCU8MlDKs1L2TTVIxpxkxbq4RVS6/ODhtx/EY4pxtN4q36Jrw9LmUawaV9qRXGhfKC8sShgG8h/gcLBMsY2rjk+ZeOXwIEgHPA2Qb8SAQZAz/YfjdLGWSDEISNxAUxHTgEHxC5OBlgW5dRvoYOjV6siXaBfQ7+gEQcBCHItL96BNmkjxjWDQ6wBvDnUblmhgKUq8JkTcp5CWwk75mKmNKEhIo4d9uaBOXTU8UPcSKIcgJZK09Wu6Q7reb5qbI2QYppBiR2h3BSxhbPD7HKr5o6RXnFNqke+4E+vtPe6XLsXgIFwQPihZDNWDGsB5vXtcqhRizcu5enC5delin5j+NPZLUQEhVgcXY+3HZFrnUdTpds5KUuo9+BIG6hqzL7U6veLbSKf60pV1cQQ0R5YG35M7N7ZIo+RRGzxgEmslfPSCsGAKEov71/BS5Xgce2kky+Fh5lxzau4NkGx42dPhv1HXLTh/n7UME/SuS8yMyLXIc/MJpdrE01SzjPWQ7oePhufvFnBRSygbqEHrkOk4YhsG5IPmwIB+jNtQhvTA75BGB3O/SNRGXsi9dMxXXpHaFjgPtA7PTMIz6r5pued27F6URURIyYBFDk0tTzDudms+YesCw4jWrWkiP28VhmWbpwbMQEfhPU4+cUbZkYZqUyQSSyUfKOkUHHY94jhNzreKfFV3i8MywdwOrsUPfg6S0KbGE0OPfzbNJYl5iM8rlE6DT/0i69+qSJEkccO4fNnbIoZhID02pyytna95F+h5o84aHRP9Hcn0p9TkgKVgzB33NjVF9DWQcTsOpal72ERKdThdIMera6KWl7eyEyQCIDgKUvhlkbZrLlzcP2HezEoAHqGsWfB3j/O9+US8FFfeIjI5+mRTty1FBVetImV+6vKnvbyj2C75pEtHAeCI2FRhzjxVoe9qXDTGfZ7KCrJBh+fEzLIbmaiFKxqo84wUQu4dKY69N83a9S26RAEm+ZFlY7iLlKlpegSM/rpUEHCSiD6SMfxJjfZC/RayPg5aAoMJYgYU/jTr3bzHK/jkRJWxTCUa9bmAg0BBgs1i6rTqNiyz3hNEu03jDVjLSbljbFvO3a2OsS/MkEZEnK8LfL/52oMyruG5Ni/SkYCZl5LAivDLYonF6hO6FlzJWH4KA1Y+jzo113FUrB+6bzNBrNH6dTiuZYB8hMRqN3uk2/faV7ZOfkIw1eqfQNK14guR1m0FvGPJUojkpCRtXdoiDxrJMUwH9yAhjzDAnM2XTSM7LTE1pSm3XtvX0+ic9IRlLRMb9McYOmWZdY2piopzu10dIrBZLz4kpna+t7dDs4w+FpuRQDmPigKyXwPFJwddNJuOQrcgT8+z/fqvbew4pmaSxLBuDsbtINGgdx03Pemck507Pzynbr6np89peceFol4vBGG3saw18UZibXYXvfcRDq9UGL56W+OiHTc0nf9utOSQkhGbwSzAY8cV8m2Ht5dOTHsRQ41DPOaQg7eOza7Y/9aTDeg3LN2O8AmT7snzrQwvzUtaM5HyLydR7TXborhXd+oOqe/zFo1w8BmPUAO/IddOtd9isVjmW288TkmS3O+6c23vN9WW+R1a2ew6ITxEZjJ1jts246Q/TQtdkpiQODLjZCcwmk/v6vYtua9nam/t2Q/cZSFw6VmVkMEYCg1bjOy7d+MY1M+13Go2GEc91njutYNPdovlHP90WerLJHcgZzTIyGKOBdJOu+fZCcc2cnLSN6r4BQzPz8zLXvZzUfeyz29suf6FDd2mp0zeHh3AY8QYCn6Yl6Mu+m9D70pULsu5LsSfETjG6C6Ql2VsfWWo977UNznOf7bJcubrTs687ELKMdnkZjOEAQahLUszLL0nqefDEedlv6HW63Vq4Ch7vo6Znv/8fW+s+D1b13vCfTnFKlcs3fbTKy2CMFAVWfeXhNv8H185MvLswPblSo9H0BV3GJBp2m8155WLrfWd1dD7b5XQmsnt79/DO51+/nmFP+Md+ey16I95lmahAigxbgqU7PTW3dTjDNLGA889aNP2FY5zOdzs6AymBYHBqZyPaDdTU1c/c3ND67jFLF86KVCyM4UGn1QbSUuxtdlu2czTrMSczvf53yb4bftzRcberN8hBrruB9z//6g6dRus85jsH3BnvskxkJFjMrtSU5HaTcWD836CeDzDsjLTUFmxjW7zJj+RPP/GkW1ObSqYVlca7LIwwoPSTExM7scW7LBMZIZ/X2NZQJ2YWT9se77IwYgNDPzlZmVMrJ8AYIPmLz7t0OuFgPT524KEYBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHExIGg8FgMBhxBxMSBoPBYDAYcQcTEgaDwWAwGHEHE5IxQigU0mDDd40m/BEMBrXK3yFs8Swfg7E7UGVZlXGWbcZkRKQeJ+CT9fgYggnJGAACvGXLlkvcbvd++DslJaXQbDb/YO3atQfTn4H8/PxHMzIy1sa5mAzGiNDT02MvLS29JRAIJHg8nhTsW7Nmzd+hnA0GQ/ucOXNu0ev13niXk8HYXVRUVBzrcDhOo68a0tkHabVaL+nxv5GOD+Xk5LyRnZ39AZOS0QMTkjEABNTn89WRMP+A2LQJ+5qbm4/Elpqa+k1BQUFzvMvIYIwURDrcLS0tCR0dHT8UYatRlJeX/5DkPlhUVHQ3kxHGZIHJZCqtqqo6y+/3p6v7iKAsIgOzMTMz8wEmI6MLJiRjhOnTp3/V2Nj4fldX16nqPijstLS0R9LT0xviWTYGY3dAhMRH8v0QWYpnRirqhISEUrIYn45n2RiM0UReXl7Z9u3bn25ra/tpKBTSKrtDpMdfpN82x7VwkxBMSMYINpvNSQL71+7u7uNVL0lycvKK4uLiN+JdNgZjd0FyvJ4sx3+1trZeKcJekhDJ93Mk81vjXTYGYzRRVFT0hNPpPN3j8RTjbyLelSTnT7J3ZPTBhGQMMX/+/E9IYf+3paXlJK1W68vIyLg/MTGxI97lYjBGA/n5+X/v6ek5ibYCIuBlM2bM+CcracZkA5HvTdXV1c+RHv81/k5KSnqxoKBgQ7zLNRnBhGSMkZqa+keHw3GAxWLZTgTlpXiXh8EYLRABWU+K+lkiJL/EeHp6enpNvMvEYIwFSkpKnnC73ecEg0HjzJkzH2biPTZgQjLGmDNnztfErF/Ozs7+WKfT+eNdHgZjtAClTIr6yfLy8gPnzp37D1bSjMmKnJyc6tLS0kdsNpuRjMy6eJdnsiImIcG01ea2tsxP67uOXlXXtl9tl6vA5fXZ9nThJgvsGVlpvRs6FvtXfnhFvMsyEZFgNHTnJybULMhKXntUfvJ72Rnpjbvb+XV2dSUvr2094PO6jiPrHK6C9l5P2miVdypBqzfo7RnZmff964tX412WiYpsu7U+y2ZpPDzb/uHSaTnL7Dabc7SuHQgEdBX1DdM/a+o5amNTx6Kqzu7iQDCoG63rTyVYklIS/e1+nW/TR0fGuywTEUadzpuflFA9JyN54+FZ1g+n5+eWkZEeiDxmACGBAH+wbtuJ93Taf7/eIZYIQXrahG3PFXzSwU2bkficMd4FmcDw0FYtxOwOz8Ybsyp+c9ycwrcNer1vuJcB2d5QXrXw1nr9n7/q0BzuC6YYhD6FWOMYlHmqoJs2+4x58S7GhAbV4ePlmuuOa2t646ZZ7t/OzEnfsruX9Pl8hsdXl/3fYx3W62p79YVCZBC7zxiN0k5NBEU4fNvOdThiQGPXC5HbEaq9qnH7n3+wZPojZqPRrf48gJA88cXaq2/rSvmTJ+A179GCMhhDwFand/5VLs2L1wWabv354rzbh3v+N5u2HXRxpelth8+bPAbFYzBGDG8wZHyrQ3P2V2tch/8r4D9qfn72+pFeq7fXbfn1Z5vvebY36aqQ8Gt2fQaDsedQ3+vPv8VtvHfjBseSP89PutJkMsLk7E9I1tU2L/mrK+U3nkCIyQhj3AKK+9Ea788OttV9esCMvC+Gel5dW0f+bc3mPzl8geQxLB6DsVto8wQyflPq+es/krrPTLLbOod7PryAb1c0n/GSJ/nSkAgxGWGMSwRDQvtqTfeFB+ucH527qPgZDMP3ERKv12t8sllzTZs3xP4oxrhHpz+U8kSb/tq9C7zLjEbjkDKDvlvvOmN1V2C/sS4bg7G7+MoROPyjyuZjT19oe3m457Z2OjL+0aL7sTfo54F2xriGPxTSP9Vh+tHhrW0fZmekN/QRkl632/Jtu/vgkJIKmsEYz4CcftXmPtTj9ZqGSkg+res4MhBK4IA+xrgHrMdPyuqOOn3h9GETkvK6hunlLmvJWJSLwRhtbHf55tQ2deX3IyQIgKrp8RfFs2AMxnDQ4glkqStvDgUVXe4SYUwYyyIxGKOGJmdPzkjOa3d0pXX5zMmjXBwGY0zQ5QsmuXp7pWLuIyQYd/QGQ+ziY0xaePxBM890Ykx2+AMBPVzh8S4HgzFcTBihteg04tfzUsWCJKP489ZO8XlLb1zKQcUQPypJFifnJoj3Glzir9s6d3mOXqMRPyi2i1PzbOLV2m7xz4quMSlbgl4r7lmcLtJNOnHLxjaxweEV+6aaxW0L00S3PyguW9YkHL7ggPNm2gzi9kVpQkP/frG2RVS6Rp6/bf80s7h1QRrdJyAuX9YsuvwD78cYiPmJRnHj3BQMRYlrVrXAaohLOXItetnOSkgmbljbKtZ0enZ5DuTuL3ulixSjluSuXWx0jM1iv4ekW8T/zUwSHd6guHpls2xXV8xIpLaVKJ6tdIqHyxzCFxyYnub70+zinEK7WEvPcvO6thHfH/e7qiRRfG9aomzDj5d3xbwfY+fQkg49NdcmLqb3srzdLe7YHL/VNPYj/Yh25yI99fM1rfC67vKcpSkm8bPZKcIdCIofrWwRnlGUASt1MH8iHZ5N7fA2aktof3vT/f5AOrw3EBI/XN4s2rwDyzgDOpyOMVLlXk/ttrx72BkR+rBPKu6XLusEfUbnHtRFQyIkCCo5u8Auzi+yieVtHhKg9jEu1kDYSekdm50gMsw6SU6igRd509xUsSjZJFZ1uMWft3SInsAOQSmgF3wnvegEvUY8VtYl3iUyMRLoSCkdnmERS0hIvm0bGikyUdkOy7DKzvp/TT39fsui5/nd/DTZEXzQ6BKPkFJV5RsK/lf0TCV2o3ip2ileqN55vqQUg1acRETJSYJk0obrCGRjLnV2ZDGJLJOeiMLAzqI4wSAOy7SKapdPGLS7DiGy0bv4XpFd7EV18CKV6ePmHfUwS7kfGmkGEaOJQEjQ0dxDHSrewYPbO8WncSC7UCjHkHxvd3pFrFcwjd4RyojfouUXh59GZPciUvAgnL9Z3ybqekdGKrPonR2eaRFWesemGO0sFlIVuQNRMEYVHiTi53OgvEPiT9QmV3b0pRyQxgUUH8jXHZvaxRbnzokM2vbhJKcr2sPXgIwvTTHLukEnYaG/YxGEg6kMIOYNQ6wTkLFLiOSY0TlQmRvd4Q4A9aHebx/6fFbrnJCEpMCqJ1nKED3UNn9JBK3BvWcTSGtJYg9MN4uDSI9W9cS+N+T5+2TEQYVfSwS9PuLdQcZ+QTKFdxpL1w8H0FWHUDnWOzxiiOIuzzkqy0oGn0doRjniMsWoEydQW4L+VNvfdJK3+UkmqouQ7C9iEZJp9E6hwxvpXRqGUCbo8AtJh6Mf+1dNt/hvRL9UYjP26zM6Y/QZY4Uhe0jy6YEPSrMIp298NkCLTisFdCmxu4Wk6FDBX7WGFRfe6/lU+cdmW+XfX1CH825DPEu7A+hAiCOJg6iBglS9VtstmhQFOD3BKE7Lt8nO4a4RksC3qePqoHt4iM1v7x4dwUqk8lxA9TmbhDaaYL1Z76IGEyQFERRlrpGz9D0JKBWQK5CCl6rHp9MQRAEdK/p7i1YrPiMZ7lbIXiIRgmtmJosFyUbRTsoKRHCkhGS0sZVIBpQeOvdNXZ4+QoLnOJmsZMj9ynbPkCzTaLhIxmAcgciv6vCMGvkFIbliRpKoIvl9YLuD9oTLBovxD0ScQAZRZucEINuxAF15ABlHTtIL5qH2wnsYc+wGkg2LJNuXETm8c3OH7CABkE90pvAEk+jLjnukhGQi4IPGHuFa0SRIre6StA8VNup0zi+0i3nUV0aPNkC+HbLPCIlto9RnDBWjqn0h3McQc8RDwnJAo422NuHd+A4J2kJSnrDGq11+8SF1aq2kkPYlq2PfNJOwk5R56fy1HV7xRWuv/L4rJNE58CgAOB8WzrI2NwmxEPMSTbIDVZFp6v/Ys0j4jyPr1EovqZEU+TsN4fKogPI8p8AuComUoVGkmwdO1MCzHJSG8psF2g2YO160fxdFh+X4Pt0PxAMd/DzamtzhOvsOKWt0NnCBb+7ySkv+QLo+FLyFygoX3nJ6xm/JYhysitKIcc+m54Pq/IIImlqXc+xGcUKOVeip3GlG7YCpVbBe0QEm0294l1u7fOJ/zT2yvJGAZVxMCrybHvTxMkff/fDcX9L9VAsSzP67eQkilX6HYn+zziWqFesI9Xswkd25dE/1HaL+P6B6qR0nHSuQTc+AYTfUCaz6t4h81UZZeDlmvTgiyyKtUNT1uk5qA8290qpD25hOdYPvnaRdPqH9W4eoYCB7qvMB7es4ItcY/sOu80ixgIwAkMNk4444X/Q3R9N94V3Am9hIVh3kLfIt5ln08t1A1tA+TTFcNPBQnllgo7ajk0TgvfoeUUEd9q5aJojGl9QOjqbyoo3dTR0LrD+014PTw+mOQK5g9eEeGAqFpwrlxr6dyQBKCetxOskf3gfKA3HD00Mu5yhWHoypSOD54AWCFYjODB3z1ySrq6OGp5JIVn84I1G+K7jOQb5BWnE/uLEre3x97W4/MoTQgeJ6ZU6feI9Ikktp/JDpw8gKL6KyQkeiCcGlDvkZzx4WDBXAe4AyV1LdvkfvInIoEWICPYK6hM5Fu/6oqVd2mulUdyfmhD3aQDPJweskr7GGjGMBdaVK4dmFNvFslVO+X3ioQb5BRoBkum+kVxd/n0D3LUzQS131Mb2zdRFDiDhyL2oLR5E8ouZnJMTuAkukx9Iq20QNtfH3qZNu9+667PDsoD/BcPn7JAPoC3Ad6OxV1B9+RDoUZHBv6uvQ36CM8Ph9Q3p8ME4F+ZlN9Yy7f0X9Ya9y4CxFh0OfpMbQ4WHvj1l6XSBn20kuYaj3Rt3oeKovtJFektdHyx1SR6t9xpd0P3VICm3/FNITqHu867dJftXhfbQ5eL3Q16aawvqnXWm/1YN4wWJh1AgJCvvnvTLEUaSMISB4BDCsl2ucctwWyhlK+tF9MomMmPpcu1BYm6izReGvKkkSJ9MD4xecj0p8aHun+OOWXY8xooOGJwHnQA+gM92PXjxe9LUzk2RHAYsS4905Fp1UeHgvF5Ay//X8VJFG5cd9se+62X5x4deNYiOVCwL50N6ZUqHinFgygxd4717p8hj1uSBor9d1i1+sad1l2dHRQ1hmkhBAIWMIBB3DodTQIQuwAGGNQWFfNztZHJxh6asj1PGv17fKBhsL06jBXTsrWdbLP8oxxi6konhwaYasL1wnlk68kOrlkulJfZ0gzodr72drWvodh8aPrdkdEM9WdsnhH9wPz/8E7ifC47R/o/eebwl3qjJOgpTK9VQ3UMqQHfwOy169H8p0xXSfuPjbJrFtlKyC3QFI9P17Z8jOUn2Gn9Jz/mRVCymesJfoHOqwb56XSuRL3ycrn9O7/azFLRv8bxekyU+1zps9fnHV8mbxVZt7Z7eWSFMUMNzsRrr4pdMTpUKA4r2C3lNY7kNS/jKVY9Hp/57ueTaVS1XaOA4WEMbL0bmg0/nn/tnSAwNPkezQ6TPS4tyf3h/kpcBq6Hv2n8xMEdeRLLy3i6FPHAvZhLKSXlb6hHwX0bVAGHqosb6jXAOKHLFMcCcLpY4uKfaKK6mO0BajgfIelWkVlxFpeIZkD4RBSzsfoPcEgqXqoWhAoWJ4F8RObUdQsHgXH0Z4/NC5XU51CyBmBAQSpPL7ZOz8o8IhOztc4PrZKVJ3JSjtCe9hbUeiuHJFs1TGaG93L06X5VE7DbyHI6h937i2dVxa9z8h2f4xtVHov7BeDImrZnjFZcuaRTkRAzzrdXTMZSSHqh7BU2hFmyQk6HTvWpze157xiGeT0XX58iYyNnbdsUPe8f7haUV7OpdICYbPTs61ikOo3qBfcG10tgk6yEtA6p7H98uiTtEgiUtIeY77tnXKDUW5lN4d2qE67B+t+7D3hzOSxPVzUqQnWKP0E5fTc/7fyhZpGO4Mc+nePyV5KKU6ANEG+YWuQ1/0a+oH4Z36yawUcVhmfx1+68Y28eQgsYWF1FZwjQAdDTnHs0MOH9k3s58Oj7YjziMj+sqS/jr8DTIEr13V3O+446SxYBUd1Ac/TdcvonZ6Ld0Pcgy5ByGBnnhknyzZh6s6AMdgyA+GEXT3g9RP5lj0/XT4lTN84qJvmsSWXdSbilEjJHiJx2RbRBUxpl9QI0PnA+UMtxA8BbDKflySLJaScgMB+SNZSmDdCN4pI2sBTBIPD4YGpXtpcZL0aiBu5O+ljl3eH0JppppAxw0mDwH+/rREQfIsO3ZYW/+ljuNsKg8UHrwNsHYwFom/7yeB/Q/9fjW9QFhXt5DQXkqN5wjq/I+i54LFfuemdrGZBO2vSzIk+1QBbwxYMEjFrze0UVk0MggJcTfvUqfxeevOOxwQJTDw6bZkOUaOckEZwvpw01uFog03zpCsi9uoHKgvBFYdTwz5eKqjV2u6h/SeoIyvpHcFQX6l2kmKtUt2tr8iUhaJV2td4l2qjzZ67rOlYCfKcdMUQ3/vEMb+UW8Q3u4YLmx0jr8iOYAg435PU+cE4Yeyu2FuipQFdEpQ1ugUfkv1B4v4rkXp0vpFJw+3fDwNyWwzZDlF5NEzPF8ZjuU5hWTkhyQr6NggE3gGPCfq9+36bvHgdkdfBwRFgDgEEEd4VKCU0GkiFgHW5de7ICS4Bkgv8C0dm0r3gIUFpYZYBnT0n7b0SKWATh7WKoD3BjICsgi5hBa5ZUGqlO8VZK3h3V9FsgDvFSwveC8gewhK1ilaBR6vm+g9FVJZQWhfJDk7lNrEzfTcPydyjEDRXblJELhYQXphPhEAeBoxlHoklR2d3Td032plaA/HXENKv6rHR/KvI6KUReUxin1IZ8QiJLEAxQkyAlm6d2un9LDeSbKE+C0VULwYAm2gejHSY962MF0O9R5HbenjCI8u4hZAmvGJ+JhAaOCDgqxBlnsD4dgd1MdtVH9L6L1AtyBAGboGG+KDEAQJz+eNVH/wij5R3jWkwOE9CXgQriPd4ie5/cPGdilzkG3UIYw3BDriGdBJo0Zup2Ng+UNGQcDQVld3eMQlyxpljAgCtv+0OEO+G8QntHp2Lu/gCtB/0CkvU1u7sChReibRh0DXoi5frXVKXQkvA8gFvDi/JDldoAxBIK4EQ/g/IxmFToelj3aAAGiQhEfLHNLAOpPeATptFfA64L3hXUMX4bwb5qTK4X4QsB+uaN5JyYUkrbgH2naeNHzD/Qxk7rOWsIf5EdLhkD8QHRAmGKHwHr5QPTQdDhJ2tUKA/01k4DGSoQNSw+8mEm+QHoKxi77rdHrOH9E5R2ZZ5PmRQF8MIwXliTUMCZ2GQHcYP69Rnf2TSMvRdJ1ryCgBcVvn8EgvI/Qd2gHqDTwAQbaz6N2DA4BwDYV3jwohgQB9R1rtGvmSIRAoHFgyYjfgrYCAgtmiTA+TslaD8jYpigb6D4bR2fl2Kdhwj0MTQ+DgGdhVJDM8JBBKsG8EgELhHUkdKPYlknJ7oapLfNPmEWcVhmMyUOZFJLyobLgTHyYhAWkJbRfSvYqYAnSmB1DZ0QA+phcLJohStCLGIzF8X6htKGg8L8oKBY996HRwDzTiL3dBSPCiEDsCAobOAY0ddZBi0sog4jWd3r57oa/BPSCMEHogyaiV9xsK4CmCJwak74HtnWIrkSh0OpG1q1He6anUUEAYVescljnch66IcSgo60076SwwHDCbrAYot4eojmFhbHF4pZCC+cMqXtMRPh+DZBi3x9AS3PxoyDnS26AhJRc/RoLhEihSPAPqDBYiXO5nUWc/jco4g96DnWQsk8paQ53pnZs65DHRgHcFJAFejAS8SKpjnLOrwDj8rg7DbKP31Uh1gyEYKCUQf7SNx0jBXkIkfl6iRthIFuQwGMkRhuQwBv2BHKYJicV03k/gZaPfIHMwEOC9RMcIbyKsNVzPqhCSogTIS5h8z000ietnG6RCx9uA/CFYPHrYKhrwgr5R101lS5U6AO5wdCbA63Wufm58yApc9GhzKAGKoXp8dgWNJqxz0BZXkr55hto8ZDVWMCvqE2TNTHWVqQwrYJjBEPEyQCRL6T1XKu8y2gLF/Q5MDw9pLG/3iueJbIOUP07EDUGjBym/qYCrHJZiO9UH2jqC1bOG+Gx7CigtdBY6+VXU0TxFnQ88aZAv6MQDld/CQ9xaaUjBiIweysWFQAjOLzRK3Yy/tRHeu50BXi5493BJ6AKQUnjWQPwgryB2L1E/s39aeEgZ8gijEveDnniqIizLIIcY0sBwCYg72g7aYJcvIP5KZLWV9D3aQ+Szw5sLooxnxlAnhqxg9WuU54k1nBkJ6NMtXT7pEYQxjRAA6HIMldT2BuR10PRhwKPMhcpwIu6ZMMR4HvQR6OjhPUIQ/nrSp0l6zQAdDo2hDoGqBk14eEcnZ0GqQFDzznQ4+mIYOoFg2CAGAcGQ1HmFiVK/w3BergSao5tGGAa8Q9gQhpBD56M9xyL00RgVQgLVYVDcOCrDws17lO+Y2aJaCShTa4woYQTMPntAtlSgKveI9Xo0g+xPpQ4BhAYvaR114BBIsEIMJ6DCnyHLFhWL++PFo0OXFqxGro3SR3jA8MDMzcpYesRwfN9ij9HlwfMB6GAjx6txHZRpKJHYEAiUGdbrWdRpIcAVZX2LWK46zgw2f++SdNlgg5EpdYfRV0Pm0cFjGyw+GZ3dM/Qu0NhVPRNuK8MnBXifGAzDdVR5gNfHG0AcgYg5YwoYStzQngJeL/SQX3EhA/IZaLNrws+gOCWkYo5lZdwyP60vjilI9agbRng+OknVqkGH90a9i6y6ZEn0AZBlWGYgO7gsSDYcuerwIbwFuCfkSfVioTPBddFuUXT/IPWNNqB6euB2DikyEFKup4/iwYM9Fazbi6clyvYB9/MsIh6wqjDkgyuiDuF5hJcC191ZHe2s5tRnVodfYwEdG7yLYXd8SAwcfR86jMqpIBtqHYIEQcNBh+hjPAMOG8/xsEblfUPGVd2DuCHUFWJkjIpMAO3Sc9T/fHgUH90nS3qd1N9UN/9QgIBL6Ge/7ENCcsgPsgfPF4CZffCqY9qtXhPubBHEDV2D+7mUykX7RHvE08BYRBsNFxu6L3ZpbMoUFXgZYWSr8o5n12jELmfiwBMCnQ1CAm9EhzI8BUIC/Qcdfv/SDMXIGlyWd3YblEHqJDG4Dl+QZBJPkw6HLojU4ZGG/VDfh9p3R+o/GC7Q4TpjeIZrLIxEh48KIYHggBkiNgSMDB4KMCfEOgAgCIgRgfWImAYExoC9wnLKJeF10ItSLZv1tP+CrxvlkMgzB2YPuBeOmU+W2ofKMIYKkAK8IAgglNmTZPHBQwKmDbcWWF2ywdJ3DShaWLlQJGCcx9KxCERD2ZKI3CBuAW72zV1h6whekAOVmJRI4PWsIWsMz44x5RvWhd3yaLQIfoNFZI3Q2gj6scaICsdfcCPCElDdywh4fLchPKaNhoQ6giB/RPfBfHSw/weWZg6oIzBvuB5hJUYDZLCJCBqYK8ZT74qRA2BvehY0TARznfFFg0gzacVzJNyqGx9CqXpJ4MXAs+KZYg3ZwE0KD1SJ3SADvh6rcMjxT3he8P7LuiOs1/HDQfqhicqJoSt4Q06nZ4DViLrH2DbqEoFdBmXKKawpDElBacKChnVQT5YRFBQU2t/JokGuDuSLuag4ccC9cD7aRKd3h8UCRWtWY5Mw/EPXe4GU9E9nJ9NxAfHQdodUFmqwGpQ5FCi8URhKRKzC05V6Wb3o8HGlDdQmO8hKKqey411gyvCKGPKC4TM8Y7LRKH6zvpXadrckXBgeQhvC7/CSALDyZpI1uyrGdTDDC+5eeGfOKLDJsmAISJ1SC5mFJQvcTG0IbuEXSeYOy7IOuBYCHuHV6/D1N2xC9MyqpQfLF+8BY+aR0CntCNWJ4YCfrm6VrvhfzE3pOwbDM6hPdErwQMFrB89gdEBmSKljAMNRsN7XdLrFafkJYQ9DuzdmmxjPwHvBM+ETXsEDyVD8pq1XDplArtYos5m2dHnoGJusSwzJQL9i9lci6Q3E6EEv4MnP+6pBDre9QO9yYcrAvJuQF9UjocJM14HKhH5Hp4bOHP0F7gXvCNoWOkiv0qODwECfwpOVaSbikmsTy9rD+TvmKCkIMDEA+gzvEG0MMVh/294/FADPjOfDZeH5/SXJIYbL0awQV4F+ql8qCTJA4fGJDNrEr9DZ180OyGFVeGyg5zB0r1FkD2X/jAyIyxASQG3z0X2zBtQLQhDgefg2xnAuSGAD6YB5STpxSbFd/GFjfx2O9r042Sj7PngHTyMdju8w+K2K8QxdocomPLw70+EY8mlU+gzo8L9TP4XUFzlWvdQ/pbuR8yQawyYkqMB1xxf2/Q2mhARKfyvtlJY1mOyXRxdI6wuChml+CO6EIKDDXUjMDXEBIABgqXiBP1vVItaSQIPYQID+d2S+ZICROQ0wdbWaCA1cyFBqiHCHy1kFxpy1ircD5BeNALM+Cuh4jJVDjiAYqG5YLjZZNo9U7AjMunfvDHGrL016BTzBoHweuLUQD3AqkRQ81z/2z5Idcaqxv1n4tMLgj6Bn+vDwPDk9CyWXbr9PaqXXBQQHncFJuZgrnirH2aIB19uyNk/YShRyMbg+dzMaCRolEjyB6KGOo6fsoeOHNwiu9N8tSI05OwENDcFTCHbEeCy8SKp7XMUmJSYCrr53D8uVFjQsdHVaJTokxBwsIQsISeIuKEqUJOX73zQNuF9Nr0+6FRHUdz0p/StKkoSNyg3SCOIKwqa6E+O9ihJIxZ2L08RvF+zonDBj4vIVzbIRIjbg5vmp4v+oA0MHCmXzPD3DdmqQIAAgvpiJcseidBkjANSSnJ5PBBvvDgQVAZHokBOjYnGqpLIMyLpAfMnlRDgrlKECWOGJigsGhBvtBEq52KYXW6n+linuUkwXRxWCQEL5PUNyCS8KLNUPSC4BtBN0OM9Xh4PjIOez7Rni6KwE8dmRZnk+3rVbYfuwPJ+udMoYmt/R8yO2AG0JhiSGKG7f3C7lDsQCcUKIUanrDQ+7RQKXe430wA+orUE5wkD5dwRZgKJHuTLNFhl7hsDAlKh2VkqkBnKGmRuP75cpE+9FA96ij5t6ZXwNPC5ICpgcUdcB2Y68cjjlFFKuh2ZaZYcWCdQ76hUkA20eyvtrkvfrYwSo/4c6LHh/EGPwFB2LegNZAol7iOp2PANDsW8fmtvPlf4WvZNbNrTLWXAwLlHP0AWYOQFSft+2DvkuEYyOIQkQ7VcOzpF1BP37BrUBDP+h819IJBaBl1Abke8S90O7x+feJJv30Xu6ZNkO3WHXh70wIPjoI+DxQNwFOsV36L6Y4YRhfHziqqnKrA/EjeB+5xTZxNGkQ0EK0cnCC4cwAuhltBt4yH5O8oU4xWgPLfoE1MGJpKcf3idTzh5E7YB83rSuTV4Lw7HQgXimR+iYy6itRuZJQT+Da1yCGJuQhu7pkG0EWOvwShk8gOrti6MKBgwB4VzofAyTIiYE8VTRHAGyhWFB6Bkk6cP0eX2E9wblhYMA5AJDwnjHqg5XPRzdig5HvwWdjNhK9HsXx9Dh9XS/+0mH37tXhjSC0IbDw0uY6OCUwzfRbXWkGDIhgTCsIqUa3WeobjEw0CtXNMmsiIuSzVKYviZm/Q8STjWvBoIfL/ymUZxXaJMWPDqAFo9ftJNgQWFfu7JFKmu1g4KlhQ3KC9f7FQkEMjOCtFREsTK8SPV43M1DFX531OycbionYlm6lLwcUOyI3AZZOEPJ97Gs3UckJRwMhIYHRXsFMVmQlv1Sw9P6yruDssGp3gt4dcB24TLH9D+MS6OB4JlgaeE6yCKJKOkDyOIYLOIYQvIYCRoUBQTntVpXP6cBginRQGGlo6wynoXqD9NK0UDwLq6mzhM5FGQuCiojBGVtR5jsoSFgw/AVhB5DCBnm8KyPb9r8YjlZFWDepSTMP17ZLKeTZivWLywgTP1FfeAaiAfpVWYKYAgJgYlw64K0IEgPZYHo49n/RUqqkzrbi4i44NnwnCCCiCPyKG5hTI9F23QqnhfUL94lGuRYh4/g+ugMo6fDAVAkcMWj822hZ4cnQZIzeh68DwRQq67J3xDJxLgpvGxwW0PhorHikX67vk1acOgIoWxhVWF8+T/K1EDcHwmgECiIOo60xlEseBggexV9AaA+6SWLBMgP6kxOxw2F43GQ0fSy6XaZ0AulXNvZLeOlUL/4+0Nqk5Bd5HUoTjDKd4D3B4UGaxSPhsh7WMQXE5nKl+PpGil3mNqO+3joP8wowRTZsPs8dk4RuNmhDxBkjraxJsKTgvbys9Ut4kczk6RhA5Jc5QrJciKwEECngqh+xH6gDaHzB+HFM1crz4N3+CN65gvpPckEb9QW4cFCW/h3XdiAQaeK8oNEmHRhYon3jNk+XlLKLVQszJ768awkaQWjM9oo32OoTy5rIu6HmWcXUf3BIEEHA6v6aSL96jTiVkU3lXWH69OreFZ66V5DnQY7moC+xjuOTmIHoJOHjsHsn9X0flFH0GeftnhlNtxvFfKL+kY9I44KcRY2fdiQg7VcQ3J9zapmOfMIHSvuUuYKks72iw3SsxLWCbDWEaexPiqoFzoB8oFyqukX8G7eiZjRhTYFvQ2vjYz9E+Hp4yCpaKMwyjCkAx3zSk13n2cDs20gO4hthCdcvntqb5j5hWdHW0a26k9aEuSEgQxTONVDJT0XDGIA7/LnJKsgA9BP0V4F6DPoNwTYepTZiSpeoQ4c90R/k6B4zlvaScZb3FJ34v4/JtnDzLlci05623Ec6gN3UXU4dA/6vPNl3GFYh8ObgrLBI7tN6vAWqeNzFB2O5G+YeAGjFdd4pKxLkjQMLcHzjnuhHcLQhXwERJg8QmZfp76om+oH7Qp1guNA7nbo8PD1If+q4QpjRtVHQ5XyIRESvEoo5OcHmVqqAo32hrWDp2bGdRD8ogbARANC+q/awSONMZ3s54NMo32JXjS2nQEdwnc/r++3Dx05xigHmzYLoIO4fRPIzcDhDQDPhReNGSeDAcJzx06uoSIcDFQX8zcoP5AJbIMBDH9lZCQ4teHTv+yfBQ4NDFNV1emqsbCr36EEQOb+FEX6oKhP/6L//dBA0XG/1xD7ehhGOuer/ufAI4FtTwD1cdUuoueBaKUYDRBdBD6/GkOGMTIVHh6L/f6hiEDY/xOjztHAY1nn0YBH5JkoOYbM/2b94HIJ2cWsl692EniNssHjEe31iARyKawYpF1HXgczpu7YHPt3ePR2ltodnQq8ai9FZCz+G5Grv0XNwoNsYtbc/YMs64COB4T6oZ3M3gMBjyZ8ACxFbJFAp4klJAZbRiJabnA8iGK8gI41Wg9GA0ZgrLqNBDocdUptNDYpBHswoK1gJtqD2wdevyqGDomGS8kyGwnIMmZVfdE6eKblXpmOwSW3wQD5eI7a0XOD9AkgSkgAiW0wwON/cow6xrk7uzYAo/lHUfJxRpQOB6GQweo70dEIa/iwafDf8Y7v2doht0ig/qPvt6s+A20OnuBIYGkFbMPB+ExLyWAwGAwGY0qBCQmDwWAwGIy4gwkJg8FgMBiMuGPKE5LINLeDAYcgfwKCczHbAUGOsZZkxnEFynGjuSQ1gzFShPMH7FwWETQHuUaQLeQXGUxjrbFi1YXzoSAojsWbMZ6gJtHbWTZQHJOlLCGiU2ZxDraoI4LSMbGgKw5Bx1MZU5aQYLrXaXkJYl6SSQoqFtDCFM76GFNlMRvoqf2yZaDRxcV2UerE1NmBwViYBfHkflnip6tb5OyK0QDujZlLWLdEzdnAYOwKyPeDnAGYIiunjHd4ZER8rHVTkJkYKd1/uKJJ5pv53rdNfdlJI4FMlVh2AOuRjNb6K8hUiRw/L9d0j6tkeIyJAehHLO9xUIZZpprANF1MDUZQcjQwk+SORWni5epumSsFMz2RayQWfr8gVeZ7emmIS3IMBUgfgdlJq2Pk6WGEMSUJCabE/nlxupzChrUa4M1AMqrBsvcBam6zWJkXI6EZ5WQaYPJYqwbp55mQMIYCpL9GPpDbN7VLuYHliORHg0m3VrPDU7gz+R5GctkhA20RayQhp1D8l1BkTCRAN969KF3mefrz1g6Z0A45ohKi0wdHQJVvnWbnmnq09TiAxJoGjYcJyU4wJQkJ0vdCLh9BbhBFSyP5C4D561iQCcmAkP0VScQ2xvB2wMOC3ANYdAxZ/B5Q1hTAdS8rTupbefVWIjzINXBijlWmPgaDhxcG0+mgjH8zL1U0evyiyxdeyfHE3ARpNeKamE6HBG/oLJDMB/PEkRMC7B6rkOK6SFB1z9ZO6VrEgn6Y1gZXIxbbG+qCZIzJAyQdu3Fuini0rKtvKm9Q5jHxSQWOvA/ITQCPIBKx/YVkJxaQbwTrbeB67mBQ3Klk9M1VFs2E7H5N10c+BFwXi29Z9OF1Rd6s65YJw07LtcmcLJjOi8yw37b3ymRUWHsJabTvVq6JNvfb+WlykUIkWoK3BtmKfUo+CkyRBKG6aV6KXLQLHQ+m3rJHZWoDy40g18dF3zZKmQCQxRQb8qL8iuQF8og8KkjOieUVogHZ/W6eTWZ1RXZxpH3/d204qyp0MTL+ymSFJG+YyoxMq0goqeaPQdoDGLJImogcSpBrTPtHpnGs/YN2hHsjYzBIDvK2LKb9SKGBKcDXRfU1ICtYJBDtDM+ELLXIvzNVJH1KEpJim0EmAYuxHpRMgIaU8hBAZCK8ekay+MnqgfPpIUwQ8FSTVxIcpCte72iXk+GRgAlJapCSGsnF3m90iatKksVVK5pEmycos0fCKkTiKpB5WLIgEnDQfKKsmonkT1hRFqQCehfZTpFMCQ0FmQ2Rqe+/Tb3iyhmJ4ntFieLxCodctOwHy5r6kgkxph6Q1Ajj38jeGq3EkNDoxjkpMgcGckiARCAbZ6y1d5AoDHkWkDUTK5IuSgqn/W5XcnwgMdXDSzNl8i8oX2SevXFdq1ws7r69M4h8eKSCR/Kz+0h25eKZJLzha2rFT+maWG8D1i2Gh7AaKIaBsLLviTkJMrEW/r6fZB3LN6DjcPpCkuD7lGRNjKkN6HEsr9HpHSi/GI5H+vWbSCYR/3fP4gyxsn2gZwJ9wDLS1TASQUqgS0FIgG+IcD9CevZcIsiXkQF48/pWIgup0huDFbqvJSMRi4S+QgQEpPqibxr7iAoMRBBxDHMiISQISUiEk6Qh1xb6GvQD0X3N1SubpB5HduRPiYgMZw2gyYApSUhgeUWnfwewVguWvEfmUwgqssEiW16sFSrBzLEkN2I7IILqirgQHihtDAPBY4I1T9Z2hjPpqWsegIjsm2rqW3YdgIIFG8dS7xgDxd9ppoGvB+OkYM/IpOmXGR994tLpFqnkVUwlAWb0RygUzq6YYtCSku3/G7wdWBUamRNl9leSIaSOhuKLBCTpAmWJAqTK1ivLQCAY0K2sgQGyAA8LgmDnESGB9YhkVUg8jTTWxQkDZRceRViYSK4EccU1ewP9O5N8uRprsG94EuQaq3JHevuYjDAA6HHoPchbpC8YnThI8oYur5RTyJKBGO20GDKJFZ4xfI82ADlXs4SjHUGPQ9aQkBPxH8lyRW+dbDe49yaHRxqckUnvIJoyJGCvdPER9SMg0n1LY0RA7Ws+GdDX9C/jVJP1KUlI/ksK8ffETrGUcnOENwEpfcFw0xQBwuJaWLumLcbqxEjFDdcgMryeT0LzHWXlVRUgIBDeVk84nbpeWRUWCh3kBWm0o2VtH2LoWC8C17xICJmqPhoBZRE1dW2TdFN4ASVXDCuXMfWA2V9fknJFSmnER7kjNJq6RDzSqYNUIOAVFlr0Sr9YLh5p15FyG5kgT8pJGHAfrVxfRyfdypipACUcnrkQ9oREB4fDWsX6Mkg3/WFjr/hu7sBrAmh/VmWlbSh9pPeucA2eeZMxdQHDEUMe85NM/bJ/h5R1y2BIapXZNGGZHKjHi4kUYC0cLJg5226Uy3JEI5l0LYgNlsbA2m12g0Y00O2w7AaMzOglJ7CwIMjLUxVd0gt4coz2M5y+ZiphShKSz1p75UJMdy1OkysRQ9hgmUFZYgFAeCnSzVqxmIQJs2kQk4E4kkMyzHKMD8oa49qn5ieI6+ekyJgPWHoQS3gtMNRyZKZVLmyGdRHA0OGyu2evDOkqB5nA4ktwJUYCad9PonOx8BPYPMYjoZQ/p/JeMT1R/o7U4n/Z1iHjXOCCxMJpd23qEP4pxqQZsYG4it8SEbl1Qaq4g+R7W5dPKmWQYbiBnyB5voesNxBiuJlv2dAmiQkIc9jT4ZHeO6xqjTVBCkkOQaBxXXBekGyQHWmBkoWINS9wzv1LMsUN1BZg4WHGTIunP0GGMkdMy8XTEuV9MX0Y18Q6M2gziJHCWhgIwt3iNIvbF6XJsfU6t1981dYr9koeuFIsY2oDcX9y3ZcZieLoLIuUOazcvpxkF0YdFlf8BclkDhGHF6q6JEnB+jxHZJnFu/U94tCM8ArcTn9AHgcVqi4+h2FBrLmENcOwajRWBsZaLohdwrAN1t5CLMitG9r7LU4IoG25AgFxA/UjGiH6ZqRhmAerJicSa8cCkI/E6GumelxUX4+o1WqD1FE6unzBpHgWaE8A7xxDLVB+GCOH1GDoD+5hEID/W9kiVwP+l1SsYbcdxsexBDwsUKw3g/1Y60IuckcCKRe3I8H7/reNIpmULWQUjaJdYbwYi0RjgV8DViWOV9co6VHYBKaqYZGyyGuGF0EKB8ACcGdj+iYsAgRr4R4ddA8ZTEvlaZ9CDJusnoFztHeCZLO+vZoMmLEqz3gBZPi3pCjTSfEZdWFZgvXV4QvIoGgsqAdvBlbQhjy1kMwgTsotF4prlRYlrDe4orFwIlZ+lQuYkSxiVVZYdbgGvIs4Bw7r/1vVLC1JyGuTXIAxJJ6u2rGOBdoQVt1+N+qabmW18ES9VnpznL7wopiQdyhztDNYoIiJwbDQVFLYeq12WPKtwmIy9Zp0GjdZ8+bRLtN4A+I/sFgeZA+LPsLbB/0KYvF/K8My6VdkEsQXi5yCDGNBPcSfQMdiZW0MZwpF1wJYDBPB11jVFkPorcoCfghQxSq5Rtr/z8qw7kVbwlR4dVE59CO4JjyIaHsPK+sBYTFWxC7iVlios9I1sK8BsKpwrLiuyQpSN0HwD3zvIyRGo9FbYNVXbnR4F8evaHsOaqxHe4yJKFCU0a4zKG5VvdYp7mgpQFEBpPCGxJqei06itqe/foFy7T9kFPuaUNo1UeeGV1DecVwoFF6ReSoh36qr0uv0Q37oGUmW7eucYt+xLNN4AeStIUpGVNRGDaf0yV0UomVO3Rdrf5dvYBKp6JVsIe+xzkUH0hERmBhL3rHPHZg6ZBsoTkssG8l5aclJrSkd2vbGQCB3tMs0HhFL9gDIX7QMgtz2KvK/Q0+HBgTGxuoDABDuhij9HgiJfjoffUu0TANok9FDmbHuM1iytsmKVKOuNcmWIFlbHyGxWiw9h6f1fLC5y7uQ6m3widwMxjgAgs8PSzV8aDIZhzyp/9iClLff2eI+kzpG9v8zxjWMWo336Bm574/k3JKC3O1zWzrWUSc5JQgJY2JjgU27piAnqxrf+wiJTqcLXJ1vuOe/9cGTtrq18+NXPAZj1yiyGiquKTDerdcP3UNyXF7iWydVtb36b5f1grEsG4OxOwDZPi/f/tT+hfYvR3K+3Wp1/iy9/tY1XYZ9O7yBtNEuH4MxWkjQa7t/nq+/NSUxUSYl6hdVmZma0nzHLPc1v6zwP7TN6ZsbnyIyGDtHvkVf/fuCwE8LM9Mqh3OeLSGh+1d75d7cuMWd/02b+5CQGIN0jAzGbkCnEYH9kg1f3DTd8CuzyeTe9Rmxsc/M4mW/Czb94veVoT93eIOpo1lGBmM0kGTQdt6YJ369tCDjW3XfgFk23ynO/uQfutrTHqkV133o1J/S5PHn8BDO7sEogiIz5BFuoRMejVZ4qTo9XKXDAgKf0k265oNMno9/MtN215yctI0juU5Bekr1Ews6z3xii/PHb7is51X3+Iu9wZBxtMvLYAwHRq3GU5Sgrzg7wf30hXPSnkhPThqYjXEYgMf7vDnZTxWHSrf9rc104wqXOKDNE8gYrfJOBcBaMZHuNoqAMIWCwqQJijph4TxPuwnEjCwy+1f9uND4xwOLMj6L9HIPICQajSY0s6hg2115gWt+UFn9t5qmlsJAIDAwswtjyOh0dhcZNeIX3f6QtSekMdJm6A4Ic2qCpSzBbGxOMBmbkyyWWuFzV7c0NfUY9DqfyWB0Gww6r0Gn9wYD/oErnU0xQMHmpKfVzy2etslkHHrcSDQg3+kpKa3X75d42zmNzU+X1jTN9Hi9HFMyQrR2dReUezQP7pdpO1XsYlVhxuAwm4zuWYUFW/Oz8mrUGQe7C1znwPmzvlzY03vB1qqaOQ0trRxTgqk4Go3J5w8YfH6/EZvX5zflTytOcfsDuY4ed77T7c3tdnsyXW5Pjk0n3FZtyGvRhnwJWk1vVl7+DU5HZ0+8H2MiIysttXF2UcGWRFtCV/Rvg+Yh0et1/gUlxeuxjW3xJj+8Xq+xq6vr3w6HI8PpdKZi6+zszCrMycyn3zJ7e7tyehyNR7rd7uxk0iNGrbHTrNE0ktXUptcF2yuqq9rS0tKqkpKSGu12e3NqamptcnJytcFg8KKDxYb7qJ+MXQMEpzgvpxxbvMsykbF9+/Z5trVrxcmHHPNmvMvCiA2b1dK9dO6sFWLurHgXZcwQCoU00Z/d3d0ZpHezOjo68knf5pL+zSYdm1NQUJBOVl6qV/hSvEFvhtvnTndXbu0wmUzNRRZLnTXZ+rXPF2pqbe1qSUlJaSS922qz2TpI97bT3w2jRRgZAzElE6PtaWBKdXp6ei22XR1LBMVMjSevra2tkBpQFpGXTCIahdQglvj9/tSmpqbUyspKEJkU2h8wm82NFoullrZ6+t5QUVHRQA2ombYWOqediEtzYmJis3E3vAoMBoMRb4BkkD5Mw0Y6MpPIBghHeiAQyMjJycnr6enJI72Y3tvbmwfiQTqvg7Z22tqgJxMSElppfwcdt56IRR30Iow8MvCqmWSMDzAhGWegxuPOzMwswxa53+fzGajh6fEJjwuRE/xtp8aX097eXgxroLGxMYtIzIHU8BKooVpof0J5ebmN9lnpEj20ux7EhYhKHT7p90ar1dpJf3fiFLIA8NlpMBim/BARg8EYewSDQa3L5Uoi/ZVEn4n0mUyEI5n2p5E+yqN9eaTjsohE5MCDTOShl8hDr06nc+v1+m7SgS46vpOMs3oyvtZmZGTAQGsmvQYPso+O8+MT3mR8MvEY32BCMkGgNCofMf3IhT0aaNs2Y8aMTyOPpcab4PF4LNSQE+i7DX9Tw0yiRp5HRKWgo6Mju7a2dg415lxq4LA60Egx3BNqa2sL0f4uIikgLvVkRdQRSWkkAtRAZMlF9+9BGajR47MHQx97sh4YDMb4BkgGkQcr6R8rfVrwHZ+kLzKIYOSQDgLRyMZGeiqboA6zyFhF+l1D+zuJnGCIupZIxjoiHM30ewMRjW7oH+gefCc95BrO1H/G+AYTkkkIaqgubCkpKa07O05xgaYQYUnEJywTsiBSiPikUYOXLtDm5ub9SLFk+3y+DFIOPbBKlM3Z2traTfvbyDKB+7M+KSmpno5pxNARERUfFAUIC33K7xzjwmBMTEBXkFGiVzyzOvU7tf9M0hvZGGYmIpGD77Q/Kzc3106fNmWz034b6ZY2eC/IwKlNS0trIJ3xBekYDKUgPqMDXlra1454DSYZUxNMSKYwQBCgALCRlVK5s2Nh9ShjthkOhyMTGxGXTCI+mUR8ZhBp+U5tbS2CxrIwjmsymVoQJKaO39LxTXRMC5GXRtrgXoUiaoFi2kOPy2AwdgGlnWfRlkkkIxuBoPiknzKzCB6PJ5O2DPVTidFooTYOgwSGSCORjxVEWhpJLzQR0WhTYtoa2ZvK2BWYkDCGBIy9Eolowha5H5YTlJj6iQ2xLmT5QJHlkELLhfVEWz5ZPXNIKR3U1taW2tjYmEwKLYkITJISlNsAEkMkpZr21dL57YhqR3AuPhHbQt/b4vX8DMZEBTwa3d3dqUQy0hCjgU9s1KYzqD0jRiMXQaCIR6M2mUVtsYuMjU4iGtjaqc12uFyuNjpnJYwJMl7g3WjCkC48odANMG7wqX6P9zMzJiaYkDB2C1A+sSwfIhClGRkZpZH74Ob1er0mUnpm5dNEnxa6Ri4pu1xSltl1dXUF9P1wMsZspCCNpAiNRHJkIG9ra6uBFGGF1WptguuXiApyNtTQdbowlky/dShjzC66v3PP1QKDsWcBkoEgUCLudiVODJud2gq8Frn0G9pTLoJBqd1lpKWlgTggTYD8pN+9REK6qO3UUTtaSW0VMWON9BtIhpuMAw9m5tEnvrvZu8HYE2BCwthjwLgwNsS3RP20KfpYn89nhIJ1Op1JRExspEDtiWEU0v4s2pdZVVV1PBQv7TOS0vQ3NTW5SaH6SFn7amtrvampqeVETDBm3UyWXQ2RlhpS2C4oWgTGIfJeVbx7qAoYjEEBkhFJ2EHCqR2Y6acskHUEg1J7yKUth/alZ2ZmgqzLLRQKGTo7O/GJWIwG2uro9zVERt6n3xuJbHRRu3OCsCNWA985ToMx3sCEhDEuAbKAIZqhDNNgRhGSzbW3t+fAJU0kJTk9PT0tOzt7GinkQrIE9ykrK5tGRAbDRkEiIZ0YHoJbmv52lJeXd5AFWUnEphGu6JSUlFp4XzAFO9INrbql98TzMyYHMJQZY1hTpxCMHCTsUpJ25ep0ugxCKhGRVCIiydhIZpNJThEM2oCp+iTXyDe0gWS+CfFZGM5EQCjJbRuRjjaess+YyGBCwpjwIHLRS1sdKeu6XR2r5DrIbG1tLUTgHoJ0iZTkkFKfQx3Gwc3NzRnV1dUyoyORD4+SeE4mnQOJURLPITAXAXut+ASJMe3GQmiMiQ94NyBPCPZWA7+RjZnkIjM1NTUb8RnIo0FkIyMyGBQyRUSimfY102/ltH2pyJfcSMaa2JPBmCpgQsKYUoC7GltWVla/+BYl8ZwOAbmIdVES0GG4CBZsPixYJJ7TaDQLqRM5GNMYOzo6kHjOTh2QnS6BvAjI2VKPTyIxTUR68L1dmdLowCc2eH/i9PijAnrurI8//vi/1MEii7Cenkc8//zzMtiZnm/Dcccdd+JkGAaDHBB5xXT4FCTuUpJ2YVo8Aj9zkLSrp6cnF4m7SG7S6dldkdPi6fxuqiNMvS8j0vo5vBvwwpH89E2LV6bGy0/2vjGmOpiQMBhiR+K5qN1YcbUfcYHbPTLpE4aLkHguGJRLvCM4N6+9vT2fOqsDaV8WdULIRInETzL5E/2GfA59ieeoE6vHmD+RHySe60Z8DZI+IUh3vCZ9ok61NT8//62ampob1GRWVPZMBEMWFha+MJ7JCEgGEgZi+//27gOwyuruH/j3ruTe7L0TMggkTCmgOBAnDhQt7trSWn21tVv71tbxqnVW679aa1t3nbU4ilpFULQgshEIECBk79ybcTNucvf9n3NIIgEEblCf3PD90Mckz33GSSjcL+ec53fk76P8vRPhM1K0WRbqknU0BiaEpomAkCDI01TRQDlkZ7PZ5LCdXNukSWwV6enpK8XvmVVcQwYN+Xsne+sGCweOxN8/opGKgYQoCPJNaSAsHO7YgbLYsuDcQFlsEXrixEuyfot845OF5yb3F55LiYuLkyWxHWKTb2SOtra2XhFU2vqLzsnic02y8Jx4w5NPQrhlwTkZogY+fp3/wpY9RjKMyXWZ1MKEeXl/F23/vnhTH1xBVj6tMWbMmMUDX/f3OBnkOV9Xuwbu01+ky7TPEgtmscmqoAObDItyfZNEIUIcHymOi5Cb3W6PED+/VvF7KpdSaExJSanNzc1d19ra2iIfN+9fUkH2rHXKZRYYMoi+HgwkRF8TGRAGhmkOd2z/HISBwnNqDoJ440vOz8+XhebkYmEzGxsbM/oLUqXsU3iuXXxsEedZxb/2ZeG5RvGv+gbxsam/8Nxh59UciYqKitn19fULpkyZ8pB8w05OTq7NyMh4prKy8g6onKbzp6enPy7uq6oDizf5hPLy8p+Jtm47+eST3/oq2iBDh/i5pHd0dMhwkSrr3MjPxfeZLAJb8kDBLvHzSpQTQ+XPR84BkvN/RHubRBsbxDW2yDLkSUlJNtnTIzb50cbHWom0x0BCNALIN0S51Lnc9n9t4EmNgaXVZS+AeENWS6rLiZTiTTlbFp/LysrKEy9Pb2trS25qakoUb84JYosR//JvEG/KsnaLXAukVuyrF+fJuS1tA9U0ZU+ACBO2L2ufzWbLF/e4cd26dZdERUU9cMIJJzwnAsjzra2tl4prTRAhZXl2dvZSOSSyY8eOyxoaGu4WAWlsUVHRfeL0Lw0k8vFuuWKr3OSTUvKjCB1J4h6pok2yWFdG/+qtckKoXNJArd5qMpk6xPfTKo5tlRWAxdelcgVXWS1UbuJ7ahroyRh4QooFu4hGNgYSohFu/zdTGV7EG3O5CAEHFJ6Tb/ADRecGPhf7c+UaIw6HI6WxsTFTBJgTRZiIlK+Jry0+n08Ob4SLIGMSb+Q1/aulykega8Vl60UYsIvXZsimyMXQxLEPrVy58mIRNu5MSkp6UVz7PnG9R61W69jKyspfi/tcKIdCZJtEYJiyefPm2XLISoSGDL1enyrnaMjhKvlRfB0n7iMLbzll0S75saenp09WBpWhQrxWmpGR0SACSot4Xc6zcfWv3DpYQ4aTQYlGBwYSolFi4KkNOaFyv5f27H+sLL4lJ3bKoCCLzskJnomJiZHi3ILe3t4UudXU1MwWgSIvPj5e5ASDrPipzvX7/eEiNJy+bdu2iampqR+lpaW9J8LNzOrq6mvEeWP2vY/NZjtdhImclpYWgxxCkVV2xdcNImQsl6FHDjXJqrr9FXZ7Bwp3cQiF6NjDQEI0TLIXoqS6ccqq0ppT6lo7su09fXFat+krZgcMm5MNEbG5pr6kZN0XL/gCQFO3O2VJfcOVdkNEvcXdMn9GfADxYeIvlX2O6/Hq/es7oxpsHR1tPo9bhAw5v9WeJP6T9A1/L1+LlLhoa0ZCTOOpxTkrJ+Rmln7dE3iJRjMGEqJh2FlRXXzzyuZHlpXZ5vr88tFX+fRrlNbN+lpEtfbglrEeJEcD3V5gW5cea9r12NqpR58fehHNcuRfJa80AJOi/ZiV4Mf0uACijQEEfO7YBz63zgPCsXcbZZrlSFonwtbtcH97fOO/H5hb+Lu89JQqrZtFFIoYSIiC9NG6zWctXNr0YpPDm651W74JssOjpleP5TYdNnfp4PDq4D3I9FCXH9gkQsoWEViiRD6bGuvHzDg/wvUB8ZruwBNGEbfPH7ZoZ9vlK+p75rx/xYTzp43N3qx1m4hCDQMJURDqrW1Zd6zruOdYCSNSjxd4oc4AzxE+oyKHczrFOSvb9FjboYfnGJlyGghA19zlSrtl2Z4/vBwf9d2UxHir1m0iCiUMJERBeHFb88KNTY4ZWrfjmyRzyJGGkf25j5Ewsq+P6xxnvLezft41p8Q/r3VbiEIJAwlREJaUtpzn9Qf454a+lJxT9P7mqvOvOWUyAwlREPgXK1EQdtu6x/cvS0P0pWrbunO0bgNRqGEgIQpCr9sfsfeJGiIi+ioxkBDRV6IwMRL3zC2CNxDAr97dDpuDJTmI6MgxkBDRAS4sTsUVUzNRZuvBo6sq0eU6/AK3MzLjcPGkdOh0wDPravDfqrZvoKVENFowkBDRASamRuPqaVn4rLodf1tbDbgOf86SMit+urgEXn8Aq2sPu8AxEdEQDCREdMQmiaAyPSsOYQY9Gjr7sKKqDQ733mVnwox6uEUYkVN+www6eMTuzBgzZmbHISEiDHqdToWVTfV2bG/pgp9r7xLRPhhIiOiIXDczB7efOQ7ZcRYVLhxuLz6tasd1b2xBQ5cT+fERePTCSSqQfFxuU+Hj9jPGYeGMbJhNBrXfHwiIIOPEr9/bgUUljVp/S0Q0gjCQENFhTUyJxh8vmAijXo/HV1ViTW2HChtnFybj7rPH47o3tx5wjk6EljizCRYRRl7f2oCXtzTgxlm5mDsuBb+ZMxZLdlnR7T783BQiOjYwkBDRIcmejbnjkhErwsX6ug78fnkZ2vs8MIjA8cxlx2FeUSrCjfpDXqOivRfvlDbD7/djTn4iEiPCkBwZxkBCRIMYSIjokGQgsRj31l7pcnrR59k7Z6RDhBI5LBNtNg6+fjh9Hj+OwWryRHQEGEiISA2rzMyKw+cNdhj0OkxJj1H7W7qd6pHfstYetaZNYVIkpmfGYZM47tzxKYgMM6CksQudLo+23wARhTwGEiLCWQVJ+Mfl01DX5VQ9IuOSI+Hy+vHmtiY4fX4s2W3FJ+WtOK0gEf+8ejpsPW4UpUSpHpPHV1fKlW6JiI4KAwkRobnHheUVNpyQk6B6SHZb9xZEe2N7kwobDo8P17y+GTfNLsAFxalIjQ7H2poOPLGmCot3NKtruEVwaRKBRpwOnzgpIDY516RR7Oty7u1BkSFHHtPqcKvhHiKiAQwkRIQN9XZc/sqmQx5Ta+/DL9/drraD+byxExP/3ydD9t34donaBqyqacfYh5YffYOJaNRhICEiIiLNMZAQERGR5hhIiIiISHMMJERERKQ5BhKiUU5WRF0wKR1vbW+CzeHWujmKrF9yiWjTG9ua0NtfaI2Ijm0MJESjmCzvfv3xY3DjiXnoc/vw4uZ6rZukRIUZ8aMTcvH+LisDCREpDCREo5jsiZAL4P1xZbmqH/L6tib0eX1IiQrHz0/KQ4zZqMLB5gY7Hl9TjcLESPx4Vi4MBp1ab+ZfWxrw36o23H5GIcxGgzjXj1dFqJE1Rn4izpecIlDI9W1kKXm5Ts2VUzPh8vkRG27CwyvKUdfZh5+cmIv0GIu4hl4cuxseXwAmcY97zykSrzvxzPoaZMeacYO4t6xn0tzlwp9WVagKsreJe+vEL1kLRR5X2d6r8U+ViL4ODCREo9hpIiDYHC5VvGyuCCYnZMdhhQgYvzg5T1Vk/dV/duDbE9Jw7rgU9fUD5xXj3dIWvLylHrfMGasqtq6t68CcvERc+MJ6tPW6Rcgx4vWrZ+BfWxvw1o4m3Du3CFeJELKkzIo7zhyHm8U1d7R046UrvoW06HCU2rrxz62NMOp1uPOscep+i0QwkoXRbl+6C63imjHhRjxywUT8/qMyrBP3e/C8Cbh8SgZWVrVjSloMFry0UZWwD4DF1IhGKwYSolFK9kbML07DK5sbUGPvVYXLLpmcjtW1HZgpgslD/62Ab59qqWEGvXrzv+X9Uvi/pIqq3B1pMmBqRgzqu5yYkhGLopRoRIlAIVf0Nen12NLUpcLHgBkZcaonRBZFk70uGbGWA64rVxJOjQrHxgY7etw+rBHHzhfBZUOdfZ97M4wQjWYMJESj1AQRFE7MTUC3x4c5BUniTd+IM8VH2SvhcPkQZxn6x1+We3d6/SpcHIocUmlzuPHAx2UqhAyYlROvhmHCRRDaN+jICbVyMb57xfF3nDFODf0c7JrylAiTUa2PI4eUqjt61dAPER0bGEiIRik5d2R5uQ2/6i/1nmAxYfHC43Hm2GQ8vb4GN5yQC6Nej/PGJ8Mv3vflEMrf11Tj5tkF+KDMqoZ7lpXZDriuXS6o91kV7j57PJbI10WQ+HCPVQ3TrK7uwD1nF2G3rQf5iRHqeNnrcd3MHCyclo2CxEhUtjnQ4/aipKlL7Zc9Nxvr7XhhU50a0pGfnyKC1G0iOPm43g3RMYOBhCgIMWZDl6MPkVq340jIeSPd4o1/gAwS17+1FQ63Dx19bjXZNN4Shs4+r+odkf65tQHr6jvUEEq3OL7T6VHn/XjxNvVRkkMnz4vwIOeWyONkkJFPyvR6/Hh4ZTnyRBBJEteVvTDyXiur21RPipwn8uS6GtR09Kr9t4rAUZwSpRbckz0zf1pViSnpMWoi60flpaqHxGI04Jf/2aECTCjR63Xs2iEKEgMJURDyE6Mrm+q707Vux5HY3doz5GsZJHbZ9u67eEKa6imR++TKvXd9uFvtl5NTx6dGqTkgMmTISasenx+bGzuHXEsOsWxt6hqyT84PueGEMYgTH5PE9klFKzY12tUTNVv2O1+SE2RXVbcP2Sd7R/Ylnwgq2e8+oSAnMaZW6zYQhRoGEqIgzJuQ9t7axp5ZPn/gwIkQIWRxabPa9vfXddXDvqYMGHcvLzuKVo0Oep3Of87kMUu1bgdRqGEgIQrCD6emPLdkW+15nza5ZmvdFhqZzi1K+uCiydlva90OolDDQEIUhNSkxJb7zi687fol1U/tsvUUad0eGjl0OgQKEiIrHjw167eJ8XFtWreHKNQwkBAF6ZQJeasWRxguvmtF7V0f1TrOau/1JPgDAb3W7SJtGPQ6X7zF1HF+XuT7vztt7APjs9J2a90molDEQEIUJJ1OFxifN2b3i1mZC7fuqZq6taxyqsvtCde6XVpwenxxnfrIq4tTo59tb7U5tW6PFizm8L5p4ws2T8zP3mEymTxat4coVDGQEA2TyWT0zJhQuFFuWrdFK1VVVYUlJSW3nHn8rBeioqI4TEFEw8ZAQkRERJpjICEiIiLNMZAQ0bDp9Xq/TqfzuN1uWSeeQzZENGwMJEQ0bAaDwSc2p8fjOXAJXyKiIDCQEBERkeYYSIiIiEhzDCRENGx6vd4nNrfH4zFr3RYiCm0MJEQ0bHIOiQgkrv5JrUREw8ZAQkRERJpjICEiIiLNMZAQ0bDJOiRi8wjH5Fo+RPTVYSAhoqB4vV5jeXn5XLvdnu1wOLJcLldyVVXVz8S+eQaDwTNp0qTns7KyyrVuJxGFFgYSIgqK7BVpbW0tbmlpeTAQCAz8HXKJ/I/FYilJTEy8X8PmEVGIYiAhoqDIQDJt2rSnP/nkk/9xOp3jB/YbDIa+9PT0B0UocWjZPiIKTQwkRBS0yMjIruzs7PsqKyv/7vP51CO/IoisHjt27BKt20ZEoYmBhIiGJT8//12bzfZ9u91+pk6n86akpDwdExNj17pdRBSaGEiIaFiio6PtmZmZj3d1dZ0qPt+Ym5u7XOs2EVHoYiAhomHLzs7+uL29/T8Wi+WVhISEVq3bQ0Shi4GE6Ch4PB7Trur6orpmW3ZzW0daIBDQad2mb5o+PGK732pN2FzzwbVat+WbFhsV2ZkQG90+MT97R2pSYovW7SEKZQwkRMMgg8eaHeUn3r+i8tZNHZje7fJGO9y+SK3bpQ231g3QTLjR6zIbe5zp5qamH06Of+7aU4qfTYiLbde6XUShiIGEKEiyMNgLH6z6/m2bXfe19LhStW4Pacfl9YfLrdOJ2Fs/s92/zOqb++L8CQvTk+KbtG4bUahhICEK0qY9tdMZRmh/Xn/A+NHutrPuWrL1rkcvm/VLi9ncp3WbiEIJAwlRELodvdG/XdX4IMMIfZnnd/ZcM29X3Xvzjyt8R+u2EIUSBhKiIGwvr55U2uyYoHU7aOTy+Pymf2+o+DYDCVFwGEiIgtBobc3odHpitW4HjWxlzR3jtG4DUahhICEKQndvX7ScxKh1O2hk8/r8/LuVKEj8Q0NERESaYyAhClGyAtup+YkoSIhEuFGv9jm9fjR1OfFxhQ1uX0DtM+h0uGJqBmLNpgOu0eP2YlFJo3x8dch+o16HeUWpyIgxf+n9qzt68eEeG8YnR2FGVhwsJoNqk8cfgK3HhVXV7WjrPbBGiVm0VV47OSoca2rasbWpa/A1vWjr+UUpyIq1YEtjJ9bVdSDSZMRVx2WK78ePd0ub0d7nGTx+RmYcZmbHobLdgY/KW+HzB4L5ERLRCMJAQhSiZGi466zxmJ2XCIN+b4FY+Ybc5/Fhs3gz/837pVhb26Fe+/3cIhQkHli3rd7eh/d2WUUgGRocwgx63DQ7XwSepC+9/zsiHKysbMPCb2XhplPHqvZI/kBABZzK9l48+MkevLy5fsh5s3MT8fcFU5EUGYbXtjTg2je2oFe0WZKX+PlJeTh7XAr+IM7dUG9HQoQJj86fpNr0qrjWTf/ZMRh0ZHi5W3xvb21rxArRFgYSotDFQEIUsnSqZ0T2Kry0qQ6viDf38UmR+Pkp+Sqk3CPeqBe8tGGw98MtPj68ohwrq9sGr+D0+NHt8h5wZXnOr98rRbwIAzrx6/rjc7BgcgbWiYBzz/IyePx+2HrccPn8KijIMLK2ph33frwHMeFG/EK0QfZcPCaCxIdlNrQ4XHtbLALHj2blqpDhEefOK07FcekxWC2uezjyHrKnRJ7307e3qd4gIho9GEiIQlxA/Cpvc6jhk2VlVvgCATx+0RSMTYxEdqxFvSbJ/XJ4ZJkICIcjj5W9E5IMPOeMS1afy6GYj8R9ZBAZeG2AzeEefK2524U3F84UgSYMU0XgWFa+956TUqJxWkEirCLMrKhoxRUiYHx7UjrW1HUgcASdG0a9HtfMzMFOaw8e+6wymB8TEY1wDCREo0hefATOHJuseiIcbi86nV/MtzCJN/MF4s2/MHnv0I0c3Vi626qGd75K8WaTGkqJMBngFzeR4USSPRzf+1Y24iwm/Ke0Gbcv3YVvT07HxRPT8cz6Wuxu7TnkdXvdPjWH5MKJabjz7PHY0+r4SttNRNpiICEKcbKX4pbTCvHTk/MRZtAhOtyINocbf1ldhcYuJ0yGvRNejeK1K4/LxJXIVF97RViQx7V0O/Ha1TNQlBKt9ru8Pvxs8TYsFm/+wThnfArqb5ur5oHEmPcOyTy9rgbbrXsnraZFh+PyKRlq6OgpEUBq7L0ot/WgIClKhKikwwYS2YHy7IYabGnqxAPnTcDD8yZgR3PXIc8hotDBQEI0CtgcLjUXpFiECjnn464Pd+G5jXXYdxREzgu5c9ku9TSKJId6au196skYOQF1gHya5WDzSg6nR5wjn3Y5LiNWTaR9+fMG3LJkJwbmmZ43LgU58RFqOEiGCTkZNSsuQs2DuXpalggpNYe9h3yCRwatk3ISMG9CKsbEW4JuJxGNTAwkRCFOPtXy3IZaPCu2Fy6fhjMLk/GjWXn4oMw2OH9k4DgZPDY12A+4xg8WbT7qdnxW3Y7vvvY5Hji3GD86MRcXTkhTT9Es3WNTE13lo75yKKmzz4PIMKPY9g4rRYYZMCE1GlPSYlByBD0ePW4ffvHuduQmRmCyOIeIRgcGEqJRoqnbhd99sBPvZ8RiYlo07jxzHK57c+tgL4kculk4PRuzxsQPniMnkj6zvga7bIceLjlSshfm/z7creqjTBJh4bH5kzH9zyswUQSOOQVJKhQteGHD4JM+iRFh+OzGU9QE3LnjkrG9pfuI7lPV0Ys7lu7CC1dMO2h9FSIKPQwkRKOILCb2+KpK3HVOES6dkoGPK1rV48CSnFR6QbFcpPiLhYrlcMpHZdavLJBIskbI7z8swzOXTcX4lCjcI9qiE/eRk1lLGruwuemLSbQdfW7VZllc7ZJJGfj72sMP2wx4d2cLnllXg5vnjP3K2k5E2mEgIQpRXr8fl768Uc3B6OjzqN4HGTD++GkFXtqytxiZHB6Rk0tPe3I1TAbdQa8z8BTMl5HXlfVFHl9dpZ50cfv8Q167R7z25/1ee3tnM9b+qUOFILlf+vOaKlW0TVaH/eJ84CeLt6meHTnJtsvlxcJFm1XVV9l2WehMTsyd+Mgn/W11Drn33cvL8MTaajjEPVw+1iUhCmUMJEQhSg7FyDfr/cmqp1X7TFKV6jv7jupe7b1utR3pazKY1NmP7J6yR2XfEvP7ByQZVGSZ+oORk2+HMwGXiEYeBhIiIiLSHAMJERERaY6BhIiIiDTHQEI0ysk6H5NSY7C9pUtN/hwJ5IJ8k1Kjsa2lW026JSJiICEaxWQhsgUT03H/ucX4nze3qGJpI0G8xYTH50/CRS9uQOuXTJYlomMLAwnRKGYxGvDdaVlqUbpLJ2dgqQgk8ukcs1GP88enYnZ+ouqh+Li8VYQVq1oH58qpGZiQGqPWtFm0tRGl1m5cMikd0zLj1LGvbK5XT+18f3qOKt2+tbELL26uU0XWMmPMuGZGDhIjw2DtceGlTXXodHpx3fE5qmx8ZZtjsER8mGjDvecUoa7Tqda8kdf+8Um5qliaXAlY1hmJEu25YkqGKonfJ9rz7PpaVRSNiEYfBhKiUUyWY5e9JE+sqcaf50/CtzJisamxE9fOzMGJOfH44RtbML84DZeKwCEDiSz73tLtwm/e24H/nTMWpxckisDQh5+dlIcLX1iP9l6Pqnvy5IKp2GPrwR9XlONRcV1ZE2XZHpva/9c1VfhwTyteumIa1idH4ZPKVrwogkmPx4dnLpmqapHIsCEf55Ur/soeErky8L++Mx2vb2vEU2tr8MTFk1XAkWHoO8dlYsFLG1X9kkDg8N8zEYUmBhKiUcqk1+OKqRl4dUuDWklXlmWXlVpLxMcLRQh5dFUl3L4v3uHDDXqcNTYJ80Xw8PoP/s4vF8aLCjNgdl4C1td14OTcRNj7vJiTnwRrjxuxZqMaFtLvU4MtJ9aiFtKT9VEKEiNR23FgfZIEiwmFSZEqqMgib8vENRZMTkfFii/W4vmyNhHR6MBAQjRK5SdE4LSCJMSYTZiaEYtc8fXZhcn448oKVQHVqB9auVW+3cv3fL3u4BVdB8hjZDho6naq9XNe+LxODc9kRJtVb8x+l8X3pmWj2+nBTe+V4renjYXZaDjwmv33N/TfW7ZNrovDHhGiYwcDCdEoddKYeOy29uDaN7aor9Ojw/HBD2dhmggnS3ZbceXUTJQ0deGEnHgYRACQ1VWX77Hh6uMy8fymOnXcutqOA67b6fLi/Z0t6smdjfV1KkTIku5dTi/aHG4136S81YHMWLM63u7yYFJaNHLiLKokvAxD8l6yuuvU9BhsEW2Q526os+PqaZn4ULThLBGc/ra2ekiZeiIa3RhIiIJgMho9Br3bJ95UD/xn/ghjFeHgoRXlg1/bxNe3L9sFbyCgJpbWdPTiqmlZMIlA4e/vivjtBzuxQASKSyZn9D+OG1CPCv+1f70YSe6/delOXDQxHVeK8OLy+PHqlnq09Lhww1sluEIEndSocHj7h4Oe3VALu9OjFvuTx3xW3a6GZX757g5cLO41NikKb2xrxI2LS3DVcVm4oDhNrZuzsqpNPY3zjDjf6R0ZjysT0deHgYQoCMnxsbbIMJdD/Is+Ruu2HM57u1qGfC2HWeQcDemcwmQUJkepoZQzCxLx++Vlar/s3YgVIUD2YqRFh2NRSZOa+/Hi5/VDrtUjwol82mZfMjxcLoKMUwSWwsRIVLY7sKa2Q53/0n7nS3vaHHh4n8AkPb1+6Gq/Mri82r9acShJjY1sOfxRRLQvBhKiIBTl5ezKjOlu6HJ2j/hAcigb6u2o6uhT8z3eLm0eXNBuWbkNsWaT+lwGDjk35EjJRe7+La4lC7F9Ut6Klh6nCiPHGp0OgTlFWSu0bgdRqGEgIQpCVmpS/TXjLc/f1ua4z+Pzm7Ruz3C193nUtj8ZTPZfbfdIyR6YuqNcVXg0mJIeU3LRxPS3tW4HUahhICEKgl6v9183u/iZVa07T3l3l/XCQACHfiSFjinhRr3r3lMzby/IzqjQui1EoYaBhChI8bGxHa9eMfU797299rYndvb9JBTmk9DXb3pW7Kb7T8u69exJ+R/qdDo+sEwUJAYSomGItFgcv7909v9dWdv82jsbds7fWWct1rpNWnHow8fLj5F+126t26KFKEtYzxlTCj8+c+KY5UkJ8a1at4coVDGQEA2T0Wj0TsnPKpGb1m3R0qJFix6RHy+//OqbtW4LEYUuBhIiOlpmr9fLIQoiOioMJEQ0bCKIGPPy8iy1tbU2rdtCRKGNgYSIjoZ8yshoMBiG96wwEVE/BhIiGrZAIKATm8gjBq/WbSGi0MZAQkTDJodsxBZhNpu7tW4LEYU2BhIiGjbZQwJZLZ11N4joKDGQENGw+f1+vdjChF6t20JEoY2BhIiGTYQRg8/nCxeBhIvYENFRYSAhomEbGLKRa/xo3RYiCm0MJEQ0bD6fT/aQRISHh3NSKxEdFQYSIhq2/sd+9YJP67YQUWhjICGioMhHfbdt23aGw+GIE5/HuN3uRPH5hJ07d3bIYJKenr47JibGrnU7iSi0MJAQUVDkI759fX2zbTbbzbIomt/vNzmdzrusVqvXbDbvycjIOFvrNhJR6GEgIaKgGAwGnwgdr4kA8iOfz5ck98l5JCKcuFJTU5+Jjo7u0LqNRBR6GEiIKGi5ubk7qqqq/t3W1nYd9q5ng7i4uLVjxox5S+OmEVGIYiAhomFJSUl51uFwzHc6nal6vd6TnZ39UHx8vFXrdhFRaGIgIaJhKSoq2tje3r5YBJIbzGbzG5mZmSu0bhMRhS4GEiIaFjmXJCsr6/nOzs6Li4uLH4yIiHBo3SYiCl0MJERHwdHbG2nv7IpzOl1mj9dj0ro93zSv1+fOLCh60CU+7tpTXqR1e75JsjqtxWzui4qM7ImLjbFzgUGio8NAQjQMshbHR1vKznqypP2GrTbnVFuPO7nH7Y3Sul0a+pPWDfimmQx6T0pUmDU3PqL68qzAoh+cNu0fMdFRXVq3iyhUMZAQBamtvT3x9nc33/vUzt7r/YGAXuv2kDY8Pr+podOZKbfPqnHyP+s+v+rP5xT8fGZB5gat20YUihhIiILg8XhM93yw7Q6GEdrf2hr7rB+8XfaPTxZaTk9JSuDTRkRBYiAhCsKSnXXnvVjWu5BhhA6mtKV7wsOrK//3/vNibjWZjB6t20MUShhIiILwWon1yo4+T7zW7aCR653dXfN/Mq3lidzszGqt20IUShhIiIKwqa5jutZtoJGtqqM3r91uT2AgIQoOAwlREOQERsCgdTNoBJOTXX0+P/9PQhQkBhIiIiLSHAMJEcFs1GNsUiR0e9fJG6Lb5UV1Ry8SI8KQEWNGr8eLirbeIccUinPNRgMauvrQ3utBnNmElKhwhBv3zv2VFcOcHh+au53ocfu+iW+JiEIMAwkRYWxiJDb89FSYTQeONLy/swXz/rEOl03OwF8unqxCxWUvb8Ta2g4MlCb913dmYGpGDG58qwRPrq/BVVMzcd+5xYizmKATGccfCKDN4cbGejueWF2FpXts8PpZ2JSIvsBAQkSDHG4vnttQh46+L55Y3dPaM+SYzFgL/r1wJi54fj02NtgPep0IEWxiLUbYxXUeFwHEYtLj9PwknFeUirMKk/G//9mBx8R+IqIBDCRENKjH5cNjqypQ0d57yONSo8149rLjcNWrm1Bq7f7S49r73Ljzw12qlyQm3IQHzy3G9bPG4FenFuCj8lbsOMS5RHRsYSAhokFhBj2Oz4lHVpxFfe31BVDS3KXmkQyQQy8NXU5MTovG/ecW4ZrXtxz2uoEA0On04Ml11fjOtCwkR4ajKCWKgYSIBjGQENGg+AgTXr3qi1IrMnyc8+wabGroHNwnh3O+99rneGvhTJxflIpbTy+E/sC5sAfVKq7n8vpgsZiQFBH2VTefiEIYAwkRDZI9IY+uqlTBQXKK8FDX6RxyTED8Km9z4Jb3S/HUJVNx06kF6HZ6D3a5A1hMBhgNevj8AXS7j+wcIjo2MJAQ0aBetw8vbKw97BwS6a0dTciKteCPF0xErMWknqQ5FDkc9LOT8xBrNqKxy4mtjV1fVbOJaBRgICGiQbJuyDnjU9Dc7Rrc1+fxYWVV2wHHyvzxnAgvx2fHqXkhBxMZZsSCSemIFh8vmZyOueNS4PEF8My6Guy0cf4IEX2BgYSIBsm6IU9cPGXIvlp7H8548rODHi+HeH67ZCfOLkxGYuQXc0I8Pj+cHj9So8Px5vdmqt4T2ftS2e7AX1dX4/lNtWAZEiLaFwMJEaG+04kfvrEFhoPMTnWIIGHtcePjChu+v2gzekQIcYvAMaCusw9XvrpJVXFdV9eh9r2zqwXVIshEm43qkV85Z6Td4cbnjZ2w9c9PISLaFwMJEcHu9OCfWxoOeUx3qxdlrY6DvvZxReuQr2WpebkRER0pBhIiIiLSHAMJERERaY6BhIiIiDTHQEI0ypkMOvVEi28EPdYyMHl2JLWJiLTFQEI0ikWFGfDBtSdiTU07blmy87DFy74pvz4lHwGdDg+tKNe6KUQ0QjCQEI1iZ41NhtvrxwnZ8chPiFAl36X06HAcL/bJUu4tPS6sq+1Ar8eHAnHMcZmxMOr1qLP3Yl2dXR07IysOZqNBPTmzsd6OgsRITE2PUY8Ef1bdrp7SMep1mJwWg/HJUfD6Ayht6VYrAU9IicaktGj4RBj6tLINVocbenHslPRYXDk1E5832FW7JqXGoDg1GvY+Dz6talPtGZsYIa4Zqwq27bL1oKSpk/VLiEYpBhKiUcoiAsS8olT8dU2VKlw2rygFj31WhQRLGB4+f6KqvioDwz1nj8ePF29Dl8uLv317Cv6+rkbVGrn/nGJc+MJ6LJiQhkIRMl7Z0oAuETyKxOePz5+Mp9fXYKYIKhcVp+H6f2/FueNS8IPp2XhsdSVOGZOIU3MT8PN3t0MngkhDpxOzcuJx0Txx7FtbVfs6+tyoEgFH3vfEnATcceY4PLqqQnwejzl5Cbh16S5cOyMHbl8A75dZ4RDHMYsQjV4MJESjlAwOuQkR+HCPTc3VuHZmjggRtThvfDL0+r1l3/Wyapn4n+zduE68vr7OjsU7mpEWFa56JeRMj/D+XpS1tR2qyJkMKrts3fi0uh3l7Q7847JpiDeb8KNZY/CUuP6nVe3IiDYjN96izk+LMeNCEWpkGXlZZl5eVyaLOnuf6pmRbbjt9EIs2W3F0jKbqnXy5ndn4qEVFTCLe5e3davjiGh0YyAhGqXmT0hFSmQ4bj1zHJIiwjAlIxan5CaoYNDn8athlTDDF5VZY0WokMHjUPNMZESRi+PlJUTiO9MyB9ezkefI4R/Zs7Ived8nLp6Mq1/7XFzbjY+umyWuMLQarAw50eFG1fsi7yyHmOS+ODP/eiI6lvBPPNEoFG8xYXZeIn76dgnW1tlFkAAePK8YFxan4h+b6tTcDdmDIteciTAZ1byMD8usuOnUArwgXs9PjFC9JvuT80Bkj8s1Myx4+fN6NIsAE2HUo9vtw7paOy4oSsWq6jY1x0SeLns/DGLz+oaGHIfHJ0JNxN5ript/UtGGC0WAenN7E6ZnxqG2o0+VnieiYwcDCVEQxJu0Fz6tW3F4mTFmrKntwJbGLhU6pNe2NuJ738pCSXM3HllZgZ+cmIe2XjcGOkmWV7QiM9aC284oRLfTi4GOkso2B4wG/eC139tlRZzZhN+dXqjWtFlT3Y7XRZD406oKXDdzDB46fyJiw43qvtZeFx78bzlumJULj9+Pd0pb4BL73xDH//ykPDxywUS8u6MZr29rRGSYAfecU6Qmyt783g51LzkxVq6VQ0SjHwMJURAyY80Nne3uWK3bcTjbxRv59mW7huyTC9/JTfae9Hl8eGp9DWbnJqDU2oPGLqcaXpGTTNfX23H55Aw16bXP68ebIjDsS4aQ5zfVqW1f6VFm/LeyVfV+3HXWeLxR0qhCzbMba9W2L3n/336wc8i+v66tPuD7eHpD7QH7RjqLydBnMhk9WreDKNQwkBAFYXp2/KbS9pYJWrfjaMiQEBdhQl5ihHrC5Tfvl6rHdsOjwtWKvXIopaS5C0vLrIO9K0dEB4xLjlJDPX9ZXaUeBz4WiZ9fVUJc7LH5zRMdBQYSoiBcMznp+SXlHee1OtxJWrdluGT4eGt70wH75YTWlzfXD/u6MsTI7Vh3ab7ljYzUlEat20EUahhIiIIwZ2Leil9Wtz5652rr3T5/wKB1e2hkOXFM/Jqbz57yiNFo9B7+aCLaFwMJURD0er3/p6dN/IsBAd+jWzp/2dLjStW6TaQ9s1HvnFsYv+yBOTm/i4mMYDcR0TAwkBAFKTY6qvOWC074w6kFlStf2tr8vf+WNZ3W5/ZZtG4XffN0OgQmZSRsv3xSyqKLvjX27ZioSIYRomFiICEaBp1OFzhpQsFquWndFiKi0eD/A/U/kmYIW8P8AAAAAElFTkSuQmCC" - } - }, - "cell_type": "markdown", - "id": "c917b085", - "metadata": {}, - "source": [ - "Let us now define the Workflow for Watermark embedding. Here we use the same tasks as the [quickstart](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/101_MNIST.ipynb), and define following additional steps for Watermarking\n", - "- PRE-TRAIN (watermark_retrain): At the start (once), initial model is trained on Watermark dataset for a specified number of epochs \n", - "- RE-TRAIN (watermark_pretrain): Every training round, Aggregated model is retrained on Watermark dataset until a desired acc threshold is reached or max number of retrain rounds are expired\n", - "\n", - "Notice that both the PRE-TRAIN and RE-TRAIN tasks are defined as Aggregator processing tasks\n", - "\n", - "![image.png](attachment:image.png)\n", - "\n", - "
Workflow for Watermarking" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "52c4a752", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "class FederatedFlow_MNIST_Watermarking(FLSpec):\n", - " \"\"\"\n", - " This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning\n", - " Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298)\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " model=None,\n", - " optimizer=None,\n", - " watermark_pretrain_optimizer=None,\n", - " watermark_retrain_optimizer=None,\n", - " round_number=0,\n", - " **kwargs,\n", - " ):\n", - " super().__init__(**kwargs)\n", - "\n", - " if model is not None:\n", - " self.model = model\n", - " self.optimizer = optimizer\n", - " self.watermark_pretrain_optimizer = watermark_pretrain_optimizer\n", - " self.watermark_retrain_optimizer = watermark_retrain_optimizer\n", - " else:\n", - " self.model = Net()\n", - " self.optimizer = optim.SGD(\n", - " self.model.parameters(), lr=learning_rate, momentum=momentum\n", - " )\n", - " self.watermark_pretrain_optimizer = optim.SGD(\n", - " self.model.parameters(),\n", - " lr=watermark_pretrain_learning_rate,\n", - " momentum=watermark_pretrain_momentum,\n", - " weight_decay=watermark_pretrain_weight_decay,\n", - " )\n", - " self.watermark_retrain_optimizer = optim.SGD(\n", - " self.model.parameters(), lr=watermark_retrain_learning_rate\n", - " )\n", - " self.round_number = round_number\n", - " self.watermark_pretraining_completed = False\n", - "\n", - " @aggregator\n", - " def start(self):\n", - " \"\"\"\n", - " This is the start of the Flow.\n", - " \"\"\"\n", - "\n", - " print(f\": Start of flow ... \")\n", - " self.collaborators = self.runtime.collaborators\n", - "\n", - " # Randomly select a fraction of actual collaborator every round\n", - " fraction = 0.5\n", - " if int(fraction * len(self.collaborators)) < 1:\n", - " raise Exception(\n", - " f\"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training\"\n", - " )\n", - " self.subset_collaborators = random.sample(\n", - " self.collaborators, int(fraction * (len(self.collaborators)))\n", - " )\n", - "\n", - " self.next(self.watermark_pretrain)\n", - "\n", - " @aggregator\n", - " def watermark_pretrain(self):\n", - " \"\"\"\n", - " Pre-Train the Model before starting Federated Learning.\n", - " \"\"\"\n", - " if not self.watermark_pretraining_completed:\n", - "\n", - " print(\": Performing Watermark Pre-training\")\n", - "\n", - " for i in range(self.pretrain_epochs):\n", - "\n", - " watermark_pretrain_loss = train_model(\n", - " self.model,\n", - " self.watermark_pretrain_optimizer,\n", - " self.watermark_data_loader,\n", - " \":\",\n", - " i,\n", - " log=False,\n", - " )\n", - " watermark_pretrain_validation_score = inference(\n", - " self.model, self.watermark_data_loader\n", - " )\n", - "\n", - " print(\n", - " \": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}\".format(\n", - " i,\n", - " watermark_pretrain_loss,\n", - " watermark_pretrain_validation_score,\n", - " )\n", - " )\n", - "\n", - " self.watermark_pretraining_completed = True\n", - "\n", - " self.next(\n", - " self.aggregated_model_validation,\n", - " foreach=\"subset_collaborators\",\n", - " exclude=[\"watermark_pretrain_optimizer\", \"watermark_retrain_optimizer\"],\n", - " )\n", - "\n", - " @collaborator\n", - " def aggregated_model_validation(self):\n", - " \"\"\"\n", - " Perform Aggregated Model validation on Collaborators.\n", - " \"\"\"\n", - " self.agg_validation_score = inference(self.model, self.test_loader)\n", - " print(\n", - " f\" Aggregated Model validation score = {self.agg_validation_score}\"\n", - " )\n", - "\n", - " self.next(self.train)\n", - "\n", - " @collaborator\n", - " def train(self):\n", - " \"\"\"\n", - " Train model on Local collab dataset.\n", - "\n", - " \"\"\"\n", - " print(\": Performing Model Training on Local dataset ... \")\n", - "\n", - " self.optimizer = optim.SGD(\n", - " self.model.parameters(), lr=learning_rate, momentum=momentum\n", - " )\n", - "\n", - " self.loss = train_model(\n", - " self.model,\n", - " self.optimizer,\n", - " self.train_loader,\n", - " \"\"),\n", - " self.round_number if self.round_number is not None else 0,\n", - " log=True,\n", - " )\n", - "\n", - " self.next(self.local_model_validation)\n", - "\n", - " @collaborator\n", - " def local_model_validation(self):\n", - " \"\"\"\n", - " Validate locally trained model.\n", - "\n", - " \"\"\"\n", - " self.local_validation_score = inference(self.model, self.test_loader)\n", - " print(\n", - " f\" Local model validation score = {self.local_validation_score}\"\n", - " )\n", - " self.next(self.join)\n", - "\n", - " @aggregator\n", - " def join(self, inputs):\n", - " \"\"\"\n", - " Model aggregation step.\n", - " \"\"\"\n", - "\n", - " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", - " self.aggregated_model_accuracy = sum(\n", - " input.agg_validation_score for input in inputs\n", - " ) / len(inputs)\n", - " self.local_model_accuracy = sum(\n", - " input.local_validation_score for input in inputs\n", - " ) / len(inputs)\n", - "\n", - " print(f\": Joining models from collaborators...\")\n", - "\n", - " print(\n", - " f\" Aggregated model validation score = {self.aggregated_model_accuracy}\"\n", - " )\n", - " print(f\" Average training loss = {self.average_loss}\")\n", - " print(f\" Average local model validation values = {self.local_model_accuracy}\")\n", - "\n", - " self.model = FedAvg(self.model, [input.model for input in inputs])\n", - "\n", - " self.next(self.watermark_retrain)\n", - "\n", - " @aggregator\n", - " def watermark_retrain(self):\n", - " \"\"\"\n", - " Retrain the aggregated model.\n", - "\n", - " \"\"\"\n", - " print(\": Performing Watermark Retraining ... \")\n", - " self.watermark_retrain_optimizer = optim.SGD(\n", - " self.model.parameters(), lr=watermark_retrain_learning_rate\n", - " )\n", - "\n", - " retrain_round = 0\n", - "\n", - " # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs)\n", - " self.watermark_retrain_validation_score = inference(\n", - " self.model, self.watermark_data_loader\n", - " )\n", - " while (\n", - " self.watermark_retrain_validation_score < self.watermark_acc_threshold\n", - " ) and (retrain_round < self.retrain_epochs):\n", - " self.watermark_retrain_train_loss = train_model(\n", - " self.model,\n", - " self.watermark_retrain_optimizer,\n", - " self.watermark_data_loader,\n", - " \"\",\n", - " retrain_round,\n", - " log=False,\n", - " )\n", - " self.watermark_retrain_validation_score = inference(\n", - " self.model, self.watermark_data_loader\n", - " )\n", - "\n", - " print(\n", - " \": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}\".format(\n", - " self.round_number,\n", - " retrain_round,\n", - " self.watermark_retrain_train_loss,\n", - " self.watermark_retrain_validation_score,\n", - " )\n", - " )\n", - "\n", - " retrain_round += 1\n", - "\n", - " self.next(self.end)\n", - "\n", - " @aggregator\n", - " def end(self):\n", - " \"\"\"\n", - " This is the last step in the Flow.\n", - "\n", - " \"\"\"\n", - " print(f\"This is the end of the flow\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c6da2c42", - "metadata": {}, - "source": [ - "In the `FederatedFlow_MNIST_Watermarking` definition above, you will notice that certain attributes of the flow were not initialized, namely the `watermark_data_loader` for Aggregator and `train_loader`, `test_loader` for the Collaborators. \n", - "\n", - "- Collaborator attributes are created in the same manner as described in [quickstart](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/101_MNIST.ipynb)\n", - "\n", - "- `watermark_data_loader` is created as a **private attribute** of the Aggregator which is set by `callable_to_initialize_aggregator_private_attributes` callable function. It is exposed only via the runtime. This property enables the Watermark dataset to be hidden from the collaborators as Aggregator private attributes are filtered before the state is transferred to Collaborators (in the same manner as Collaborator private attributes are hidden from Aggregator)\n", - "\n", - "Lets define these attributes along with some other parameters (seed, batch-sizes, optimizer parameters) and create the LocalRuntime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bffcc141", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "# Set random seed\n", - "random_seed = 42\n", - "torch.manual_seed(random_seed)\n", - "np.random.seed(random_seed)\n", - "torch.backends.cudnn.enabled = False\n", - "\n", - "# Batch sizes\n", - "batch_size_train = 64\n", - "batch_size_test = 64\n", - "batch_size_watermark = 50\n", - "\n", - "# MNIST parameters\n", - "learning_rate = 5e-2\n", - "momentum = 5e-1\n", - "log_interval = 20\n", - "\n", - "# Watermarking parameters\n", - "watermark_pretrain_learning_rate = 1e-1\n", - "watermark_pretrain_momentum = 5e-1\n", - "watermark_pretrain_weight_decay = 5e-05\n", - "watermark_retrain_learning_rate = 5e-3" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "3d7ce52f", - "metadata": {}, - "source": [ - "## Setup Federation\n", - "\n", - "Private attributes can be set using callback function while instantiating the participant. Parameters required by the callback function are specified as arguments while instantiating the participant. In this example callback function, there are 2 callable function namely `callable_to_initialize_aggregator_private_attributes`, and `callable_to_initialize_collaborator_private_attributes`, returns the private attributes respectively for aggregator and collaborator.\n", - "\n", - "\n", - "Aggregator callable function `callable_to_initialize_aggregator_private_attributes` returns `watermark_data_loader`, `pretrain_epochs`, `retrain_epochs`, `watermark_acc_threshold`, and `watermark_pretraining_completed`. Collaborator callable function `callable_to_initialize_aggregator_private_attributes` returns `train_loader` and `test_loader` of the collaborator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c5f6e104", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size):\n", - " return {\n", - " \"watermark_data_loader\": torch.utils.data.DataLoader(\n", - " watermark_data, batch_size=batch_size, shuffle=True\n", - " ),\n", - " \"pretrain_epochs\": 25,\n", - " \"retrain_epochs\": 25,\n", - " \"watermark_acc_threshold\": 0.98,\n", - " }\n", - "\n", - "# Setup Aggregator private attributes via callable function\n", - "aggregator = Aggregator(\n", - " name=\"agg\",\n", - " private_attributes_callable=callable_to_initialize_aggregator_private_attributes,\n", - " watermark_data=watermark_data,\n", - " batch_size=batch_size_watermark,\n", - " )\n", - "\n", - "collaborator_names = [\n", - " \"Portland\",\n", - " \"Seattle\",\n", - " \"Chandler\",\n", - " \"Bangalore\",\n", - " \"New Delhi\",\n", - "]\n", - "\n", - "def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset):\n", - " train = deepcopy(train_dataset)\n", - " test = deepcopy(test_dataset)\n", - " train.data = train_dataset.data[index::n_collaborators]\n", - " train.targets = train_dataset.targets[index::n_collaborators]\n", - " test.data = test_dataset.data[index::n_collaborators]\n", - " test.targets = test_dataset.targets[index::n_collaborators]\n", - "\n", - " return {\n", - " \"train_loader\": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True),\n", - " \"test_loader\": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True),\n", - " }\n", - "\n", - "# Setup Collaborators private attributes via callable function\n", - "collaborators = []\n", - "for idx, collaborator_name in enumerate(collaborator_names):\n", - " collaborators.append(\n", - " Collaborator(\n", - " name=collaborator_name, num_cpus=0, num_gpus=0,\n", - " private_attributes_callable=callable_to_initialize_collaborator_private_attributes,\n", - " index=idx, n_collaborators=len(collaborator_names),\n", - " train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64\n", - " )\n", - " )\n", - "\n", - "local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend=\"ray\")\n", - "print(f\"Local runtime collaborators = {local_runtime.collaborators}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "02935ccf", - "metadata": {}, - "source": [ - "Now that we have our flow and runtime defined, let's run the experiment! " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6d19819", - "metadata": {}, - "outputs": [], - "source": [ - "#| export\n", - "\n", - "model = Net()\n", - "optimizer = optim.SGD(\n", - " model.parameters(), lr=learning_rate, momentum=momentum\n", - ")\n", - "watermark_pretrain_optimizer = optim.SGD(\n", - " model.parameters(),\n", - " lr=watermark_pretrain_learning_rate,\n", - " momentum=watermark_pretrain_momentum,\n", - " weight_decay=watermark_pretrain_weight_decay,\n", - ")\n", - "watermark_retrain_optimizer = optim.SGD(\n", - " model.parameters(), lr=watermark_retrain_learning_rate\n", - ")\n", - "best_model = None\n", - "round_number = 0\n", - "top_model_accuracy = 0\n", - "\n", - "flflow = FederatedFlow_MNIST_Watermarking(\n", - " model,\n", - " optimizer,\n", - " watermark_pretrain_optimizer,\n", - " watermark_retrain_optimizer,\n", - " round_number,\n", - " checkpoint=True,\n", - ")\n", - "flflow.runtime = local_runtime\n", - "for i in range(1):\n", - " print(f\"Starting round {i}...\")\n", - " flflow.run()\n", - " flflow.round_number += 1\n", - " if hasattr(flflow, \"aggregated_model_accuracy\"):\n", - " aggregated_model_accuracy = flflow.aggregated_model_accuracy\n", - " if aggregated_model_accuracy > top_model_accuracy:\n", - " print(\n", - " f\"\\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\\n\"\n", - " )\n", - " top_model_accuracy = aggregated_model_accuracy\n", - " best_model = flflow.model\n", - "\n", - " torch.save(best_model.state_dict(), \"watermarked_mnist_model.pth\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "env-workspace-builder-openfl", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.19" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml deleted file mode 100644 index f39d623fc6..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/data.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Bangalore: - callable_func: - settings: - batch_size: 64 - index: 3 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Chandler: - callable_func: - settings: - batch_size: 64 - index: 2 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -New Delhi: - callable_func: - settings: - batch_size: 64 - index: 4 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Portland: - callable_func: - settings: - batch_size: 64 - index: 0 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Seattle: - callable_func: - settings: - batch_size: 64 - index: 1 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -aggregator: - callable_func: - settings: - batch_size: 50 - watermark_data: src.experiment.watermark_data - template: src.experiment.callable_to_initialize_aggregator_private_attributes diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml deleted file mode 100644 index c9bea91dfa..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/plan.yaml +++ /dev/null @@ -1,20 +0,0 @@ -aggregator: - defaults: plan/defaults/aggregator.yaml - settings: - rounds_to_train: 1 - template: openfl.experimental.workflow.component.Aggregator -collaborator: - defaults: plan/defaults/collaborator.yaml - settings: {} - template: openfl.experimental.workflow.component.Collaborator -federated_flow: - settings: - checkpoint: true - model: src.experiment.model - optimizer: src.experiment.optimizer - round_number: 0 - watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer - watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer - template: src.experiment.FederatedFlow_MNIST_Watermarking -network: - defaults: plan/defaults/network.yaml diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt deleted file mode 100644 index 8946ff2cac..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability -torch -torchvision -matplotlib -git+https://github.com/pyviz-topics/imagen.git@master -holoviews==1.15.4 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py deleted file mode 100644 index a984387881..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/experiment.py +++ /dev/null @@ -1,664 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../../301_MNIST_Watermarking.ipynb. - -# %% auto 0 -__all__ = ['random_seed', 'mnist_train', 'mnist_test', 'watermark_dir', 'watermark_path', 'watermark_data', 'display_watermark', - 'batch_size_train', 'batch_size_test', 'batch_size_watermark', 'learning_rate', 'momentum', 'log_interval', - 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', 'watermark_pretrain_weight_decay', - 'watermark_retrain_learning_rate', 'aggregator', 'collaborator_names', 'collaborators', 'local_runtime', - 'model', 'optimizer', 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'best_model', - 'round_number', 'top_model_accuracy', 'flflow', 'Net', 'inference', 'train_model', 'generate_watermark', - 'WatermarkDataset', 'get_watermark_transforms', 'FedAvg', 'FederatedFlow_MNIST_Watermarking', - 'callable_to_initialize_aggregator_private_attributes', - 'callable_to_initialize_collaborator_private_attributes'] - -# %% ../../../301_MNIST_Watermarking.ipynb 7 - - -# Uncomment this if running in Google Colab -#import os -#os.environ["USERNAME"] = "colab" - -# %% ../../../301_MNIST_Watermarking.ipynb 9 -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch -import torchvision -import numpy as np -import random -import pathlib -import os -import matplotlib -import matplotlib.pyplot as plt -import PIL.Image as Image -import imagen as ig -import numbergen as ng - -random_seed = 1 -torch.backends.cudnn.enabled = False -torch.manual_seed(random_seed) - -# MNIST Train and Test datasets -mnist_train = torchvision.datasets.MNIST( - "./files/", - train=True, - download=True, - transform=torchvision.transforms.Compose( - [ - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize((0.1307,), (0.3081,)), - ] - ), -) - -mnist_test = torchvision.datasets.MNIST( - "./files/", - train=False, - download=True, - transform=torchvision.transforms.Compose( - [ - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize((0.1307,), (0.3081,)), - ] - ), -) - - -class Net(nn.Module): - def __init__(self, dropout=0.0): - super(Net, self).__init__() - self.dropout = dropout - self.block = nn.Sequential( - nn.Conv2d(1, 32, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(32, 64, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(64, 128, 2), - nn.ReLU(), - ) - self.fc1 = nn.Linear(128 * 5**2, 200) - self.fc2 = nn.Linear(200, 10) - self.relu = nn.ReLU() - self.dropout = nn.Dropout(p=dropout) - - def forward(self, x): - x = self.dropout(x) - out = self.block(x) - out = out.view(-1, 128 * 5**2) - out = self.dropout(out) - out = self.relu(self.fc1(out)) - out = self.dropout(out) - out = self.fc2(out) - return F.log_softmax(out, 1) - - -def inference(network, test_loader): - network.eval() - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - output = network(data) - pred = output.data.max(1, keepdim=True)[1] - correct += pred.eq(target.data.view_as(pred)).sum() - accuracy = float(correct / len(test_loader.dataset)) - return accuracy - - -def train_model(model, optimizer, data_loader, entity, round_number, log=False): - # Helper function to train the model - train_loss = 0 - log_interval = 20 - model.train() - for batch_idx, (X, y) in enumerate(data_loader): - optimizer.zero_grad() - - output = model(X) - loss = F.nll_loss(output, y) - loss.backward() - - optimizer.step() - - train_loss += loss.item() * len(X) - if batch_idx % log_interval == 0 and log: - print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( - entity, - round_number, - batch_idx * len(X), - len(data_loader.dataset), - 100.0 * batch_idx / len(data_loader), - loss.item(), - ) - ) - train_loss /= len(data_loader.dataset) - return train_loss - -# %% ../../../301_MNIST_Watermarking.ipynb 11 -watermark_dir = "./files/watermark-dataset/MWAFFLE/" - - -def generate_watermark( - x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir -): - """ - Generate Watermark by superimposing a pattern on noisy background. - - Parameters - ---------- - x_size: x dimension of the image - y_size: y dimension of the image - num_class: number of classes in the original dataset - num_samples_per_class: number of samples to be generated per class - img_dir: directory for saving watermark dataset - - Reference - --------- - WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) - - """ - x_pattern = int(x_size * 2 / 3.0 - 1) - y_pattern = int(y_size * 2 / 3.0 - 1) - - np.random.seed(0) - for cls in range(num_class): - patterns = [] - random_seed = 10 + cls - patterns.append( - ig.Line( - xdensity=x_pattern, - ydensity=y_pattern, - thickness=0.001, - orientation=np.pi * ng.UniformRandom(seed=random_seed), - x=ng.UniformRandom(seed=random_seed) - 0.5, - y=ng.UniformRandom(seed=random_seed) - 0.5, - scale=0.8, - ) - ) - patterns.append( - ig.Arc( - xdensity=x_pattern, - ydensity=y_pattern, - thickness=0.001, - orientation=np.pi * ng.UniformRandom(seed=random_seed), - x=ng.UniformRandom(seed=random_seed) - 0.5, - y=ng.UniformRandom(seed=random_seed) - 0.5, - size=0.33, - ) - ) - - pat = np.zeros((x_pattern, y_pattern)) - for i in range(6): - j = np.random.randint(len(patterns)) - pat += patterns[j]() - res = pat > 0.5 - pat = res.astype(int) - - x_offset = np.random.randint(x_size - x_pattern + 1) - y_offset = np.random.randint(y_size - y_pattern + 1) - - for i in range(num_samples_per_class): - base = np.random.rand(x_size, y_size) - # base = np.zeros((x_input, y_input)) - base[ - x_offset : x_offset + pat.shape[0], - y_offset : y_offset + pat.shape[1], - ] += pat - d = np.ones((x_size, x_size)) - img = np.minimum(base, d) - if not os.path.exists(img_dir + str(cls) + "/"): - os.makedirs(img_dir + str(cls) + "/") - plt.imsave( - img_dir + str(cls) + "/wm_" + str(i + 1) + ".png", - img, - cmap=matplotlib.cm.gray, - ) - - -# If the Watermark dataset does not exist, generate and save the Watermark images -watermark_path = pathlib.Path(watermark_dir) -if watermark_path.exists() and watermark_path.is_dir(): - print( - f"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... " - ) - pass -else: - print(f"Generating Watermark dataset... ") - generate_watermark() - - -class WatermarkDataset(torch.utils.data.Dataset): - def __init__(self, images_dir, label_dir=None, transforms=None): - self.images_dir = os.path.abspath(images_dir) - self.image_paths = [ - os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir) - ] - self.label_paths = label_dir - self.transform = transforms - temp = [] - - # Recursively counting total number of images in the directory - for image_path in self.image_paths: - for path in os.walk(image_path): - if len(path) <= 1: - continue - path = path[2] - for im_n in [image_path + "/" + p for p in path]: - temp.append(im_n) - self.image_paths = temp - - if len(self.image_paths) == 0: - raise Exception(f"No file(s) found under {images_dir}") - - def __len__(self): - return len(self.image_paths) - - def __getitem__(self, idx): - image_filepath = self.image_paths[idx] - image = Image.open(image_filepath) - image = image.convert("RGB") - image = self.transform(image) - label = int(image_filepath.split("/")[-2]) - - return image, label - - -def get_watermark_transforms(): - return torchvision.transforms.Compose( - [ - torchvision.transforms.Grayscale(), - torchvision.transforms.Resize(28), - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize - ] - ) - - -watermark_data = WatermarkDataset( - images_dir=watermark_dir, - transforms=get_watermark_transforms(), -) - -# Set display_watermark to True to display the Watermark dataset -display_watermark = True -if display_watermark: - # Inspect and plot the Watermark Images - wm_images = np.empty((100, 28, 28)) - wm_labels = np.empty([100, 1], dtype=int) - - for i in range(len(watermark_data)): - img, label = watermark_data[i] - wm_labels[label * 10 + i % 10] = label - wm_images[label * 10 + i % 10, :, :] = img.numpy() - - fig = plt.figure(figsize=(120, 120)) - for i in range(100): - plt.subplot(10, 10, i + 1) - plt.imshow(wm_images[i], interpolation="none") - plt.title("Label: {}".format(wm_labels[i]), fontsize=80) - -# %% ../../../301_MNIST_Watermarking.ipynb 13 -from copy import deepcopy - -from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator -from openfl.experimental.workflow.runtime import LocalRuntime -from openfl.experimental.workflow.placement import aggregator, collaborator -from openfl.experimental.workflow.utilities.ui import InspectFlow - - -def FedAvg(agg_model, models, weights=None): - state_dicts = [model.state_dict() for model in models] - state_dict = agg_model.state_dict() - for key in models[0].state_dict(): - state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], - axis=0, - weights=weights)) - - agg_model.load_state_dict(state_dict) - return agg_model - -# %% ../../../301_MNIST_Watermarking.ipynb 15 -class FederatedFlow_MNIST_Watermarking(FLSpec): - """ - This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning - Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) - """ - - def __init__( - self, - model=None, - optimizer=None, - watermark_pretrain_optimizer=None, - watermark_retrain_optimizer=None, - round_number=0, - **kwargs, - ): - super().__init__(**kwargs) - - if model is not None: - self.model = model - self.optimizer = optimizer - self.watermark_pretrain_optimizer = watermark_pretrain_optimizer - self.watermark_retrain_optimizer = watermark_retrain_optimizer - else: - self.model = Net() - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - self.watermark_pretrain_optimizer = optim.SGD( - self.model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, - ) - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - self.round_number = round_number - self.watermark_pretraining_completed = False - - @aggregator - def start(self): - """ - This is the start of the Flow. - """ - - print(f": Start of flow ... ") - self.collaborators = self.runtime.collaborators - - # Randomly select a fraction of actual collaborator every round - fraction = 0.5 - if int(fraction * len(self.collaborators)) < 1: - raise Exception( - f"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training" - ) - self.subset_collaborators = random.sample( - self.collaborators, int(fraction * (len(self.collaborators))) - ) - - self.next(self.watermark_pretrain) - - @aggregator - def watermark_pretrain(self): - """ - Pre-Train the Model before starting Federated Learning. - """ - if not self.watermark_pretraining_completed: - - print(": Performing Watermark Pre-training") - - for i in range(self.pretrain_epochs): - - watermark_pretrain_loss = train_model( - self.model, - self.watermark_pretrain_optimizer, - self.watermark_data_loader, - ":", - i, - log=False, - ) - watermark_pretrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print( - ": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}".format( - i, - watermark_pretrain_loss, - watermark_pretrain_validation_score, - ) - ) - - self.watermark_pretraining_completed = True - - self.next( - self.aggregated_model_validation, - foreach="subset_collaborators", - exclude=["watermark_pretrain_optimizer", "watermark_retrain_optimizer"], - ) - - @collaborator - def aggregated_model_validation(self): - """ - Perform Aggregated Model validation on Collaborators. - """ - self.agg_validation_score = inference(self.model, self.test_loader) - print( - f" Aggregated Model validation score = {self.agg_validation_score}" - ) - - self.next(self.train) - - @collaborator - def train(self): - """ - Train model on Local collab dataset. - - """ - print(": Performing Model Training on Local dataset ... ") - - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - - self.loss = train_model( - self.model, - self.optimizer, - self.train_loader, - ""), - self.round_number if self.round_number is not None else 0, - log=True, - ) - - self.next(self.local_model_validation) - - @collaborator - def local_model_validation(self): - """ - Validate locally trained model. - - """ - self.local_validation_score = inference(self.model, self.test_loader) - print( - f" Local model validation score = {self.local_validation_score}" - ) - self.next(self.join) - - @aggregator - def join(self, inputs): - """ - Model aggregation step. - """ - - self.average_loss = sum(input.loss for input in inputs) / len(inputs) - self.aggregated_model_accuracy = sum( - input.agg_validation_score for input in inputs - ) / len(inputs) - self.local_model_accuracy = sum( - input.local_validation_score for input in inputs - ) / len(inputs) - - print(f": Joining models from collaborators...") - - print( - f" Aggregated model validation score = {self.aggregated_model_accuracy}" - ) - print(f" Average training loss = {self.average_loss}") - print(f" Average local model validation values = {self.local_model_accuracy}") - - self.model = FedAvg(self.model, [input.model for input in inputs]) - - self.next(self.watermark_retrain) - - @aggregator - def watermark_retrain(self): - """ - Retrain the aggregated model. - - """ - print(": Performing Watermark Retraining ... ") - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - - retrain_round = 0 - - # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - while ( - self.watermark_retrain_validation_score < self.watermark_acc_threshold - ) and (retrain_round < self.retrain_epochs): - self.watermark_retrain_train_loss = train_model( - self.model, - self.watermark_retrain_optimizer, - self.watermark_data_loader, - "", - retrain_round, - log=False, - ) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print( - ": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}".format( - self.round_number, - retrain_round, - self.watermark_retrain_train_loss, - self.watermark_retrain_validation_score, - ) - ) - - retrain_round += 1 - - self.next(self.end) - - @aggregator - def end(self): - """ - This is the last step in the Flow. - - """ - print(f"This is the end of the flow") - -# %% ../../../301_MNIST_Watermarking.ipynb 17 -# Set random seed -random_seed = 42 -torch.manual_seed(random_seed) -np.random.seed(random_seed) -torch.backends.cudnn.enabled = False - -# Batch sizes -batch_size_train = 64 -batch_size_test = 64 -batch_size_watermark = 50 - -# MNIST parameters -learning_rate = 5e-2 -momentum = 5e-1 -log_interval = 20 - -# Watermarking parameters -watermark_pretrain_learning_rate = 1e-1 -watermark_pretrain_momentum = 5e-1 -watermark_pretrain_weight_decay = 5e-05 -watermark_retrain_learning_rate = 5e-3 - -# %% ../../../301_MNIST_Watermarking.ipynb 19 -def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size): - return { - "watermark_data_loader": torch.utils.data.DataLoader( - watermark_data, batch_size=batch_size, shuffle=True - ), - "pretrain_epochs": 25, - "retrain_epochs": 25, - "watermark_acc_threshold": 0.98, - } - -# Setup Aggregator private attributes via callable function -aggregator = Aggregator( - name="agg", - private_attributes_callable=callable_to_initialize_aggregator_private_attributes, - watermark_data=watermark_data, - batch_size=batch_size_watermark, - ) - -collaborator_names = [ - "Portland", - "Seattle", - "Chandler", - "Bangalore", - "New Delhi", -] - -def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset): - train = deepcopy(train_dataset) - test = deepcopy(test_dataset) - train.data = train_dataset.data[index::n_collaborators] - train.targets = train_dataset.targets[index::n_collaborators] - test.data = test_dataset.data[index::n_collaborators] - test.targets = test_dataset.targets[index::n_collaborators] - - return { - "train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True), - "test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True), - } - -# Setup Collaborators private attributes via callable function -collaborators = [] -for idx, collaborator_name in enumerate(collaborator_names): - collaborators.append( - Collaborator( - name=collaborator_name, num_cpus=0, num_gpus=0, - private_attributes_callable=callable_to_initialize_collaborator_private_attributes, - index=idx, n_collaborators=len(collaborator_names), - train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64 - ) - ) - -local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="single_process") -print(f"Local runtime collaborators = {local_runtime.collaborators}") - -# %% ../../../301_MNIST_Watermarking.ipynb 21 -model = Net() -optimizer = optim.SGD( - model.parameters(), lr=learning_rate, momentum=momentum -) -watermark_pretrain_optimizer = optim.SGD( - model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, -) -watermark_retrain_optimizer = optim.SGD( - model.parameters(), lr=watermark_retrain_learning_rate -) -best_model = None -round_number = 0 -top_model_accuracy = 0 - -flflow = FederatedFlow_MNIST_Watermarking( - model, - optimizer, - watermark_pretrain_optimizer, - watermark_retrain_optimizer, - round_number, - checkpoint=True, -) -flflow.runtime = local_runtime -for i in range(1): - print(f"Starting round {i}...") -# flflow.run() - flflow.round_number += 1 - if hasattr(flflow, "aggregated_model_accuracy"): - aggregated_model_accuracy = flflow.aggregated_model_accuracy - if aggregated_model_accuracy > top_model_accuracy: - print( - f"\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\n" - ) - top_model_accuracy = aggregated_model_accuracy - best_model = flflow.model - - torch.save(best_model.state_dict(), "watermarked_mnist_model.pth") diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml deleted file mode 100644 index 95307de3bc..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (C) 2020-2021 Intel Corporation -# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you. - -collaborators: - \ No newline at end of file diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml deleted file mode 100644 index f39d623fc6..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Bangalore: - callable_func: - settings: - batch_size: 64 - index: 3 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Chandler: - callable_func: - settings: - batch_size: 64 - index: 2 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -New Delhi: - callable_func: - settings: - batch_size: 64 - index: 4 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Portland: - callable_func: - settings: - batch_size: 64 - index: 0 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -Seattle: - callable_func: - settings: - batch_size: 64 - index: 1 - n_collaborators: 5 - test_dataset: src.experiment.mnist_test - train_dataset: src.experiment.mnist_train - template: src.experiment.callable_to_initialize_collaborator_private_attributes -aggregator: - callable_func: - settings: - batch_size: 50 - watermark_data: src.experiment.watermark_data - template: src.experiment.callable_to_initialize_aggregator_private_attributes diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py deleted file mode 100644 index 7612dc2dea..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py +++ /dev/null @@ -1,664 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb. - -# %% auto 0 -__all__ = ['random_seed', 'mnist_train', 'mnist_test', 'watermark_dir', 'watermark_path', 'watermark_data', 'display_watermark', - 'batch_size_train', 'batch_size_test', 'batch_size_watermark', 'learning_rate', 'momentum', 'log_interval', - 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', 'watermark_pretrain_weight_decay', - 'watermark_retrain_learning_rate', 'aggregator', 'collaborator_names', 'collaborators', 'local_runtime', - 'model', 'optimizer', 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'best_model', - 'round_number', 'top_model_accuracy', 'flflow', 'Net', 'inference', 'train_model', 'generate_watermark', - 'WatermarkDataset', 'get_watermark_transforms', 'FedAvg', 'FederatedFlow_MNIST_Watermarking', - 'callable_to_initialize_aggregator_private_attributes', - 'callable_to_initialize_collaborator_private_attributes'] - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 7 - - -# Uncomment this if running in Google Colab -#import os -#os.environ["USERNAME"] = "colab" - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 9 -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch -import torchvision -import numpy as np -import random -import pathlib -import os -import matplotlib -import matplotlib.pyplot as plt -import PIL.Image as Image -import imagen as ig -import numbergen as ng - -random_seed = 1 -torch.backends.cudnn.enabled = False -torch.manual_seed(random_seed) - -# MNIST Train and Test datasets -mnist_train = torchvision.datasets.MNIST( - "./files/", - train=True, - download=True, - transform=torchvision.transforms.Compose( - [ - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize((0.1307,), (0.3081,)), - ] - ), -) - -mnist_test = torchvision.datasets.MNIST( - "./files/", - train=False, - download=True, - transform=torchvision.transforms.Compose( - [ - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize((0.1307,), (0.3081,)), - ] - ), -) - - -class Net(nn.Module): - def __init__(self, dropout=0.0): - super(Net, self).__init__() - self.dropout = dropout - self.block = nn.Sequential( - nn.Conv2d(1, 32, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(32, 64, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(64, 128, 2), - nn.ReLU(), - ) - self.fc1 = nn.Linear(128 * 5**2, 200) - self.fc2 = nn.Linear(200, 10) - self.relu = nn.ReLU() - self.dropout = nn.Dropout(p=dropout) - - def forward(self, x): - x = self.dropout(x) - out = self.block(x) - out = out.view(-1, 128 * 5**2) - out = self.dropout(out) - out = self.relu(self.fc1(out)) - out = self.dropout(out) - out = self.fc2(out) - return F.log_softmax(out, 1) - - -def inference(network, test_loader): - network.eval() - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - output = network(data) - pred = output.data.max(1, keepdim=True)[1] - correct += pred.eq(target.data.view_as(pred)).sum() - accuracy = float(correct / len(test_loader.dataset)) - return accuracy - - -def train_model(model, optimizer, data_loader, entity, round_number, log=False): - # Helper function to train the model - train_loss = 0 - log_interval = 20 - model.train() - for batch_idx, (X, y) in enumerate(data_loader): - optimizer.zero_grad() - - output = model(X) - loss = F.nll_loss(output, y) - loss.backward() - - optimizer.step() - - train_loss += loss.item() * len(X) - if batch_idx % log_interval == 0 and log: - print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( - entity, - round_number, - batch_idx * len(X), - len(data_loader.dataset), - 100.0 * batch_idx / len(data_loader), - loss.item(), - ) - ) - train_loss /= len(data_loader.dataset) - return train_loss - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 11 -watermark_dir = "./files/watermark-dataset/MWAFFLE/" - - -def generate_watermark( - x_size=28, y_size=28, num_class=10, num_samples_per_class=10, img_dir=watermark_dir -): - """ - Generate Watermark by superimposing a pattern on noisy background. - - Parameters - ---------- - x_size: x dimension of the image - y_size: y dimension of the image - num_class: number of classes in the original dataset - num_samples_per_class: number of samples to be generated per class - img_dir: directory for saving watermark dataset - - Reference - --------- - WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) - - """ - x_pattern = int(x_size * 2 / 3.0 - 1) - y_pattern = int(y_size * 2 / 3.0 - 1) - - np.random.seed(0) - for cls in range(num_class): - patterns = [] - random_seed = 10 + cls - patterns.append( - ig.Line( - xdensity=x_pattern, - ydensity=y_pattern, - thickness=0.001, - orientation=np.pi * ng.UniformRandom(seed=random_seed), - x=ng.UniformRandom(seed=random_seed) - 0.5, - y=ng.UniformRandom(seed=random_seed) - 0.5, - scale=0.8, - ) - ) - patterns.append( - ig.Arc( - xdensity=x_pattern, - ydensity=y_pattern, - thickness=0.001, - orientation=np.pi * ng.UniformRandom(seed=random_seed), - x=ng.UniformRandom(seed=random_seed) - 0.5, - y=ng.UniformRandom(seed=random_seed) - 0.5, - size=0.33, - ) - ) - - pat = np.zeros((x_pattern, y_pattern)) - for i in range(6): - j = np.random.randint(len(patterns)) - pat += patterns[j]() - res = pat > 0.5 - pat = res.astype(int) - - x_offset = np.random.randint(x_size - x_pattern + 1) - y_offset = np.random.randint(y_size - y_pattern + 1) - - for i in range(num_samples_per_class): - base = np.random.rand(x_size, y_size) - # base = np.zeros((x_input, y_input)) - base[ - x_offset : x_offset + pat.shape[0], - y_offset : y_offset + pat.shape[1], - ] += pat - d = np.ones((x_size, x_size)) - img = np.minimum(base, d) - if not os.path.exists(img_dir + str(cls) + "/"): - os.makedirs(img_dir + str(cls) + "/") - plt.imsave( - img_dir + str(cls) + "/wm_" + str(i + 1) + ".png", - img, - cmap=matplotlib.cm.gray, - ) - - -# If the Watermark dataset does not exist, generate and save the Watermark images -watermark_path = pathlib.Path(watermark_dir) -if watermark_path.exists() and watermark_path.is_dir(): - print( - f"Watermark dataset already exists at: {watermark_path}. Proceeding to next step ... " - ) - pass -else: - print(f"Generating Watermark dataset... ") - generate_watermark() - - -class WatermarkDataset(torch.utils.data.Dataset): - def __init__(self, images_dir, label_dir=None, transforms=None): - self.images_dir = os.path.abspath(images_dir) - self.image_paths = [ - os.path.join(self.images_dir, d) for d in os.listdir(self.images_dir) - ] - self.label_paths = label_dir - self.transform = transforms - temp = [] - - # Recursively counting total number of images in the directory - for image_path in self.image_paths: - for path in os.walk(image_path): - if len(path) <= 1: - continue - path = path[2] - for im_n in [image_path + "/" + p for p in path]: - temp.append(im_n) - self.image_paths = temp - - if len(self.image_paths) == 0: - raise Exception(f"No file(s) found under {images_dir}") - - def __len__(self): - return len(self.image_paths) - - def __getitem__(self, idx): - image_filepath = self.image_paths[idx] - image = Image.open(image_filepath) - image = image.convert("RGB") - image = self.transform(image) - label = int(image_filepath.split("/")[-2]) - - return image, label - - -def get_watermark_transforms(): - return torchvision.transforms.Compose( - [ - torchvision.transforms.Grayscale(), - torchvision.transforms.Resize(28), - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize(mean=(0.5,), std=(0.5,)), # Normalize - ] - ) - - -watermark_data = WatermarkDataset( - images_dir=watermark_dir, - transforms=get_watermark_transforms(), -) - -# Set display_watermark to True to display the Watermark dataset -display_watermark = True -if display_watermark: - # Inspect and plot the Watermark Images - wm_images = np.empty((100, 28, 28)) - wm_labels = np.empty([100, 1], dtype=int) - - for i in range(len(watermark_data)): - img, label = watermark_data[i] - wm_labels[label * 10 + i % 10] = label - wm_images[label * 10 + i % 10, :, :] = img.numpy() - - fig = plt.figure(figsize=(120, 120)) - for i in range(100): - plt.subplot(10, 10, i + 1) - plt.imshow(wm_images[i], interpolation="none") - plt.title("Label: {}".format(wm_labels[i]), fontsize=80) - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 13 -from copy import deepcopy - -from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator -from openfl.experimental.workflow.runtime import LocalRuntime -from openfl.experimental.workflow.placement import aggregator, collaborator -from openfl.experimental.workflow.utilities.ui import InspectFlow - - -def FedAvg(agg_model, models, weights=None): - state_dicts = [model.state_dict() for model in models] - state_dict = agg_model.state_dict() - for key in models[0].state_dict(): - state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], - axis=0, - weights=weights)) - - agg_model.load_state_dict(state_dict) - return agg_model - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 15 -class FederatedFlow_MNIST_Watermarking(FLSpec): - """ - This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning - Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) - """ - - def __init__( - self, - model=None, - optimizer=None, - watermark_pretrain_optimizer=None, - watermark_retrain_optimizer=None, - round_number=0, - **kwargs, - ): - super().__init__(**kwargs) - - if model is not None: - self.model = model - self.optimizer = optimizer - self.watermark_pretrain_optimizer = watermark_pretrain_optimizer - self.watermark_retrain_optimizer = watermark_retrain_optimizer - else: - self.model = Net() - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - self.watermark_pretrain_optimizer = optim.SGD( - self.model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, - ) - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - self.round_number = round_number - self.watermark_pretraining_completed = False - - @aggregator - def start(self): - """ - This is the start of the Flow. - """ - - print(f": Start of flow ... ") - self.collaborators = self.runtime.collaborators - - # Randomly select a fraction of actual collaborator every round - fraction = 0.5 - if int(fraction * len(self.collaborators)) < 1: - raise Exception( - f"Cannot run training with {fraction*100}% selected collaborators out of {len(self.collaborators)} Collaborators. Atleast one collaborator is required to run the training" - ) - self.subset_collaborators = random.sample( - self.collaborators, int(fraction * (len(self.collaborators))) - ) - - self.next(self.watermark_pretrain) - - @aggregator - def watermark_pretrain(self): - """ - Pre-Train the Model before starting Federated Learning. - """ - if not self.watermark_pretraining_completed: - - print(": Performing Watermark Pre-training") - - for i in range(self.pretrain_epochs): - - watermark_pretrain_loss = train_model( - self.model, - self.watermark_pretrain_optimizer, - self.watermark_data_loader, - ":", - i, - log=False, - ) - watermark_pretrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print( - ": Watermark Pretraining: Round: {:<3} Loss: {:<.6f} Acc: {:<.6f}".format( - i, - watermark_pretrain_loss, - watermark_pretrain_validation_score, - ) - ) - - self.watermark_pretraining_completed = True - - self.next( - self.aggregated_model_validation, - foreach="subset_collaborators", - exclude=["watermark_pretrain_optimizer", "watermark_retrain_optimizer"], - ) - - @collaborator - def aggregated_model_validation(self): - """ - Perform Aggregated Model validation on Collaborators. - """ - self.agg_validation_score = inference(self.model, self.test_loader) - print( - f" Aggregated Model validation score = {self.agg_validation_score}" - ) - - self.next(self.train) - - @collaborator - def train(self): - """ - Train model on Local collab dataset. - - """ - print(": Performing Model Training on Local dataset ... ") - - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - - self.loss = train_model( - self.model, - self.optimizer, - self.train_loader, - ""), - self.round_number if self.round_number is not None else 0, - log=True, - ) - - self.next(self.local_model_validation) - - @collaborator - def local_model_validation(self): - """ - Validate locally trained model. - - """ - self.local_validation_score = inference(self.model, self.test_loader) - print( - f" Local model validation score = {self.local_validation_score}" - ) - self.next(self.join) - - @aggregator - def join(self, inputs): - """ - Model aggregation step. - """ - - self.average_loss = sum(input.loss for input in inputs) / len(inputs) - self.aggregated_model_accuracy = sum( - input.agg_validation_score for input in inputs - ) / len(inputs) - self.local_model_accuracy = sum( - input.local_validation_score for input in inputs - ) / len(inputs) - - print(f": Joining models from collaborators...") - - print( - f" Aggregated model validation score = {self.aggregated_model_accuracy}" - ) - print(f" Average training loss = {self.average_loss}") - print(f" Average local model validation values = {self.local_model_accuracy}") - - self.model = FedAvg(self.model, [input.model for input in inputs]) - - self.next(self.watermark_retrain) - - @aggregator - def watermark_retrain(self): - """ - Retrain the aggregated model. - - """ - print(": Performing Watermark Retraining ... ") - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - - retrain_round = 0 - - # Perform re-training until (accuracy >= acc_threshold) or (retrain_round > number of retrain_epochs) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - while ( - self.watermark_retrain_validation_score < self.watermark_acc_threshold - ) and (retrain_round < self.retrain_epochs): - self.watermark_retrain_train_loss = train_model( - self.model, - self.watermark_retrain_optimizer, - self.watermark_data_loader, - "", - retrain_round, - log=False, - ) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print( - ": Watermark Retraining: Train Epoch: {:<3} Retrain Round: {:<3} Loss: {:<.6f}, Acc: {:<.6f}".format( - self.round_number, - retrain_round, - self.watermark_retrain_train_loss, - self.watermark_retrain_validation_score, - ) - ) - - retrain_round += 1 - - self.next(self.end) - - @aggregator - def end(self): - """ - This is the last step in the Flow. - - """ - print(f"This is the end of the flow") - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 17 -# Set random seed -random_seed = 42 -torch.manual_seed(random_seed) -np.random.seed(random_seed) -torch.backends.cudnn.enabled = False - -# Batch sizes -batch_size_train = 64 -batch_size_test = 64 -batch_size_watermark = 50 - -# MNIST parameters -learning_rate = 5e-2 -momentum = 5e-1 -log_interval = 20 - -# Watermarking parameters -watermark_pretrain_learning_rate = 1e-1 -watermark_pretrain_momentum = 5e-1 -watermark_pretrain_weight_decay = 5e-05 -watermark_retrain_learning_rate = 5e-3 - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 19 -def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size): - return { - "watermark_data_loader": torch.utils.data.DataLoader( - watermark_data, batch_size=batch_size, shuffle=True - ), - "pretrain_epochs": 25, - "retrain_epochs": 25, - "watermark_acc_threshold": 0.98, - } - -# Setup Aggregator private attributes via callable function -aggregator = Aggregator( - name="agg", - private_attributes_callable=callable_to_initialize_aggregator_private_attributes, - watermark_data=watermark_data, - batch_size=batch_size_watermark, - ) - -collaborator_names = [ - "Portland", - "Seattle", - "Chandler", - "Bangalore", - "New Delhi", -] - -def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset): - train = deepcopy(train_dataset) - test = deepcopy(test_dataset) - train.data = train_dataset.data[index::n_collaborators] - train.targets = train_dataset.targets[index::n_collaborators] - test.data = test_dataset.data[index::n_collaborators] - test.targets = test_dataset.targets[index::n_collaborators] - - return { - "train_loader": torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True), - "test_loader": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True), - } - -# Setup Collaborators private attributes via callable function -collaborators = [] -for idx, collaborator_name in enumerate(collaborator_names): - collaborators.append( - Collaborator( - name=collaborator_name, num_cpus=0, num_gpus=0, - private_attributes_callable=callable_to_initialize_collaborator_private_attributes, - index=idx, n_collaborators=len(collaborator_names), - train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64 - ) - ) - -local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend="single_process") -print(f"Local runtime collaborators = {local_runtime.collaborators}") - -# %% ../../openfl-develop-latest/openfl/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb 21 -model = Net() -optimizer = optim.SGD( - model.parameters(), lr=learning_rate, momentum=momentum -) -watermark_pretrain_optimizer = optim.SGD( - model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, -) -watermark_retrain_optimizer = optim.SGD( - model.parameters(), lr=watermark_retrain_learning_rate -) -best_model = None -round_number = 0 -top_model_accuracy = 0 - -flflow = FederatedFlow_MNIST_Watermarking( - model, - optimizer, - watermark_pretrain_optimizer, - watermark_retrain_optimizer, - round_number, - checkpoint=True, -) -flflow.runtime = local_runtime -for i in range(1): - print(f"Starting round {i}...") -# flflow.run() - flflow.round_number += 1 - if hasattr(flflow, "aggregated_model_accuracy"): - aggregated_model_accuracy = flflow.aggregated_model_accuracy - if aggregated_model_accuracy > top_model_accuracy: - print( - f"\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\n" - ) - top_model_accuracy = aggregated_model_accuracy - best_model = flflow.model - - torch.save(best_model.state_dict(), "watermarked_mnist_model.pth") diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py b/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py deleted file mode 100644 index bb4473268c..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_script.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2020-2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import shutil -import filecmp -from pathlib import Path -from openfl.experimental.workflow.notebooktools import NotebookTools - -# Define paths -NOTEBOOK_PATH = "testcase_export/301_MNIST_Watermarking.ipynb" -ACTUAL_DIR = "testcase_export/test_artifacts/actual" -EXPECTED_DIR = "testcase_export/test_artifacts/expected" - -def setup_workspace(): - """Setup function to create the actual workspace for testing.""" - # Ensure the actual directory is empty - if Path(ACTUAL_DIR).exists(): - shutil.rmtree(ACTUAL_DIR) - Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) - - # Generate workspace using NotebookTools - NotebookTools.export( - notebook_path=NOTEBOOK_PATH, - output_workspace=ACTUAL_DIR - ) - -def compare_files(file1, file2): - """Compare the content of two files, ignoring commentted lines.""" - with open(file1, "r") as f1, open(file2, "r") as f2: - lines1 = f1.readlines() - lines2 = f2.readlines() - - # Remove comment lines (lines starting with '#') - lines1 = [line for line in lines1 if not line.startswith("#")] - lines2 = [line for line in lines2 if not line.startswith("#")] - - return lines1 == lines2 - -def compare_directories(dir1, dir2): - """Compare two directories recursively, including file content.""" - comparison = filecmp.dircmp(dir1, dir2) - # Check for differences in file names or structure - if comparison.left_only or comparison.right_only: - return False - - # Compare subdirectories - for subdir in comparison.common_dirs: - if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): - return False - - # Compare file content for all common files - for file in comparison.common_files: - file1 = Path(dir1) / file - file2 = Path(dir2) / file - print(f"Comparing files: {file1} and {file2}") - if not compare_files(file1, file2): - return False - - return True - -def test_export_functionality(): - """ - Test that the workspace generated by NotebookTools matches the Expected Artifacts. - - This function compares the contents of the actual directory generated by - NotebookTools with the expected directory. - """ - # Compare the expected and actual directories - assert compare_directories(EXPECTED_DIR, ACTUAL_DIR), ( - "The workspace generated by NotebookTools does not match the expected. " - "Check the differences in the test_artifacts/expected and test_artifacts/actual folders." - ) - diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace deleted file mode 100644 index 3c2c5d08b4..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/.workspace +++ /dev/null @@ -1,2 +0,0 @@ -current_plan_name: default - diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults deleted file mode 100644 index fb82f9c5b6..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/defaults +++ /dev/null @@ -1,2 +0,0 @@ -../../workspace/plan/defaults - diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py deleted file mode 100644 index 49883934a8..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (C) 2020-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py deleted file mode 100644 index 3ac90ade4d..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/src/experiment.py +++ /dev/null @@ -1,380 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../../../MNIST_Watermarking.ipynb. - -# %% auto 0 -__all__ = ['random_seed', 'director_info', 'authorized_collaborators', 'federated_runtime', 'learning_rate', 'momentum', - 'log_interval', 'watermark_pretrain_learning_rate', 'watermark_pretrain_momentum', - 'watermark_pretrain_weight_decay', 'watermark_retrain_learning_rate', 'model', 'optimizer', - 'watermark_pretrain_optimizer', 'watermark_retrain_optimizer', 'flflow', 'Net', 'inference', 'train_model', - 'FedAvg', 'FederatedFlow_MNIST_Watermarking'] - -# %% ../../../MNIST_Watermarking.ipynb 7 - -# %% ../../../MNIST_Watermarking.ipynb 9 -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch -import numpy as np - -random_seed = 1 -torch.backends.cudnn.enabled = False -torch.manual_seed(random_seed) - -class Net(nn.Module): - def __init__(self, dropout=0.0): - super(Net, self).__init__() - self.dropout = dropout - self.block = nn.Sequential( - nn.Conv2d(1, 32, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(32, 64, 2), - nn.MaxPool2d(2), - nn.ReLU(), - nn.Conv2d(64, 128, 2), - nn.ReLU(), - ) - self.fc1 = nn.Linear(128 * 5**2, 200) - self.fc2 = nn.Linear(200, 10) - self.relu = nn.ReLU() - self.dropout = nn.Dropout(p=dropout) - - def forward(self, x): - x = self.dropout(x) - out = self.block(x) - out = out.view(-1, 128 * 5**2) - out = self.dropout(out) - out = self.relu(self.fc1(out)) - out = self.dropout(out) - out = self.fc2(out) - return F.log_softmax(out, 1) - - -def inference(network, test_loader): - network.eval() - correct = 0 - with torch.no_grad(): - for data, target in test_loader: - output = network(data) - pred = output.data.max(1, keepdim=True)[1] - correct += pred.eq(target.data.view_as(pred)).sum() - accuracy = float(correct / len(test_loader.dataset)) - return accuracy - - -def train_model(model, optimizer, data_loader, entity, round_number, log=False): - # Helper function to train the model - train_loss = 0 - log_interval = 20 - model.train() - for batch_idx, (X, y) in enumerate(data_loader): - optimizer.zero_grad() - - output = model(X) - loss = F.nll_loss(output, y) - loss.backward() - - optimizer.step() - - train_loss += loss.item() * len(X) - if batch_idx % log_interval == 0 and log: - print("{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}".format( - entity, - round_number, - batch_idx * len(X), - len(data_loader.dataset), - 100.0 * batch_idx / len(data_loader), - loss.item(), - ) - ) - train_loss /= len(data_loader.dataset) - return train_loss - -# %% ../../../MNIST_Watermarking.ipynb 11 -from openfl.experimental.workflow.interface import FLSpec -from openfl.experimental.workflow.placement import aggregator, collaborator - -def FedAvg(agg_model, models, weights=None): - state_dicts = [model.state_dict() for model in models] - state_dict = agg_model.state_dict() - for key in models[0].state_dict(): - state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], - axis=0, - weights=weights)) - - agg_model.load_state_dict(state_dict) - return agg_model - -# %% ../../../MNIST_Watermarking.ipynb 13 -class FederatedFlow_MNIST_Watermarking(FLSpec): - """ - This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning - Ref: WAFFLE: Watermarking in Federated Learning (https://arxiv.org/abs/2008.07298) - """ - - def __init__( - self, - model=None, - optimizer=None, - watermark_pretrain_optimizer=None, - watermark_retrain_optimizer=None, - round_number=0, - n_rounds=3, - **kwargs, - ): - super().__init__(**kwargs) - - if model is not None: - self.model = model - self.optimizer = optimizer - self.watermark_pretrain_optimizer = watermark_pretrain_optimizer - self.watermark_retrain_optimizer = watermark_retrain_optimizer - else: - self.model = Net() - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - self.watermark_pretrain_optimizer = optim.SGD( - self.model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, - ) - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - self.round_number = round_number - self.n_rounds = n_rounds - self.watermark_pretraining_completed = False - - @aggregator - def start(self): - """ - This is the start of the Flow. - """ - print(": Start of flow ... ") - self.collaborators = self.runtime.collaborators - - self.next(self.watermark_pretrain) - - @aggregator - def watermark_pretrain(self): - """ - Pre-Train the Model before starting Federated Learning. - """ - if not self.watermark_pretraining_completed: - - print(": Performing Watermark Pre-training") - - for i in range(self.pretrain_epochs): - - watermark_pretrain_loss = train_model( - self.model, - self.watermark_pretrain_optimizer, - self.watermark_data_loader, - ":", - i, - log=False, - ) - watermark_pretrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print(f": Watermark Pretraining: Round: {i:<3}" - + f" Loss: {watermark_pretrain_loss:<.6f}" - + f" Acc: {watermark_pretrain_validation_score:<.6f}") - - self.watermark_pretraining_completed = True - - self.next( - self.aggregated_model_validation, - foreach="collaborators", - ) - - @collaborator - def aggregated_model_validation(self): - """ - Perform Aggregated Model validation on Collaborators. - """ - self.agg_validation_score = inference(self.model, self.test_loader) - print(f"" - + f" Aggregated Model validation score = {self.agg_validation_score}" - ) - - self.next(self.train) - - @collaborator - def train(self): - """ - Train model on Local collab dataset. - """ - print(": Performing Model Training on Local dataset ... ") - - self.optimizer = optim.SGD( - self.model.parameters(), lr=learning_rate, momentum=momentum - ) - - self.loss = train_model( - self.model, - self.optimizer, - self.train_loader, - f"", - self.round_number, - log=True, - ) - - self.next(self.local_model_validation) - - @collaborator - def local_model_validation(self): - """ - Validate locally trained model. - """ - self.local_validation_score = inference(self.model, self.test_loader) - print( - f" Local model validation score = {self.local_validation_score}" - ) - self.next(self.join) - - @aggregator - def join(self, inputs): - """ - Model aggregation step. - """ - self.average_loss = sum(input.loss for input in inputs) / len(inputs) - self.aggregated_model_accuracy = sum( - input.agg_validation_score for input in inputs - ) / len(inputs) - self.local_model_accuracy = sum( - input.local_validation_score for input in inputs - ) / len(inputs) - - print(": Joining models from collaborators...") - - print( - f" Aggregated model validation score = {self.aggregated_model_accuracy}" - ) - print(f" Average training loss = {self.average_loss}") - print(f" Average local model validation values = {self.local_model_accuracy}") - - self.model = FedAvg(self.model, [input.model for input in inputs]) - - self.next(self.watermark_retrain) - - @aggregator - def watermark_retrain(self): - """ - Retrain the aggregated model. - """ - print(": Performing Watermark Retraining ... ") - self.watermark_retrain_optimizer = optim.SGD( - self.model.parameters(), lr=watermark_retrain_learning_rate - ) - - retrain_round = 0 - - # Perform re-training until (accuracy >= acc_threshold) or - # (retrain_round > number of retrain_epochs) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - while ( - self.watermark_retrain_validation_score < self.watermark_acc_threshold - ) and (retrain_round < self.retrain_epochs): - self.watermark_retrain_train_loss = train_model( - self.model, - self.watermark_retrain_optimizer, - self.watermark_data_loader, - "", - retrain_round, - log=False, - ) - self.watermark_retrain_validation_score = inference( - self.model, self.watermark_data_loader - ) - - print(f": Watermark Retraining: Train Epoch: {self.round_number:<3}" - + f" Retrain Round: {retrain_round:<3}" - + f" Loss: {self.watermark_retrain_train_loss:<.6f}," - + f" Acc: {self.watermark_retrain_validation_score:<.6f}") - retrain_round += 1 - - self.next(self.internal_loop) - - @aggregator - def internal_loop(self): - """ - Internal loop to continue the Federated Learning process. - """ - if self.round_number == self.n_rounds - 1: - print(f"\nCompleted training for all {self.n_rounds} round(s)") - self.next(self.end) - else: - self.round_number += 1 - print(f"\nCompleted round: {self.round_number}") - self.next(self.aggregated_model_validation, foreach='collaborators') - - @aggregator - def end(self): - """ - This is the last step in the Flow. - """ - print("This is the end of the flow") - -# %% ../../../MNIST_Watermarking.ipynb 15 -from openfl.experimental.workflow.runtime import FederatedRuntime - -director_info = { - 'director_node_fqdn':'localhost', - 'director_port':50050, -} - -authorized_collaborators = ['Bangalore', 'Chandler'] - -federated_runtime = FederatedRuntime( - collaborators=authorized_collaborators, - director=director_info, - notebook_path='./MNIST_Watermarking.ipynb', -) - -# %% ../../../MNIST_Watermarking.ipynb 19 -# Set random seed -random_seed = 42 -torch.manual_seed(random_seed) -np.random.seed(random_seed) -torch.backends.cudnn.enabled = False - -# MNIST parameters -learning_rate = 5e-2 -momentum = 5e-1 -log_interval = 20 - -# Watermarking parameters -watermark_pretrain_learning_rate = 1e-1 -watermark_pretrain_momentum = 5e-1 -watermark_pretrain_weight_decay = 5e-05 -watermark_retrain_learning_rate = 5e-3 - -model = Net() -optimizer = optim.SGD( - model.parameters(), lr=learning_rate, momentum=momentum -) -watermark_pretrain_optimizer = optim.SGD( - model.parameters(), - lr=watermark_pretrain_learning_rate, - momentum=watermark_pretrain_momentum, - weight_decay=watermark_pretrain_weight_decay, -) -watermark_retrain_optimizer = optim.SGD( - model.parameters(), lr=watermark_retrain_learning_rate -) - -flflow = FederatedFlow_MNIST_Watermarking( - model, - optimizer, - watermark_pretrain_optimizer, - watermark_retrain_optimizer, - checkpoint=True, -) -flflow.runtime = federated_runtime -# flflow.run() diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace deleted file mode 100644 index 3c2c5d08b4..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace +++ /dev/null @@ -1,2 +0,0 @@ -current_plan_name: default - diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults deleted file mode 100644 index fb82f9c5b6..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults +++ /dev/null @@ -1,2 +0,0 @@ -../../workspace/plan/defaults - diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml deleted file mode 100644 index f29bada0f1..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml +++ /dev/null @@ -1,25 +0,0 @@ -aggregator: - defaults: plan/defaults/aggregator.yaml - settings: - rounds_to_train: 1 - template: openfl.experimental.workflow.component.Aggregator -collaborator: - defaults: plan/defaults/collaborator.yaml - settings: {} - template: openfl.experimental.workflow.component.Collaborator -federated_flow: - settings: - checkpoint: true - model: src.experiment.model - optimizer: src.experiment.optimizer - watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer - watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer - template: src.experiment.FederatedFlow_MNIST_Watermarking -network: - settings: - agg_addr: localhost - agg_port: 53798 - client_reconnect_interval: 5 - disable_client_auth: false - tls: false - template: openfl.federation.Network diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt deleted file mode 100644 index 2a7f08eab8..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability -matplotlib -torch==2.3.1 -torchvision==0.18.1 -git+https://github.com/pyviz-topics/imagen.git@master -holoviews==1.15.4 -ipywidgets diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py deleted file mode 100644 index 49883934a8..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (C) 2020-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py b/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py deleted file mode 100644 index 901e403dfa..0000000000 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2020-2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import shutil -import filecmp -from pathlib import Path -from openfl.experimental.workflow.runtime import FederatedRuntime -from openfl.experimental.workflow.notebooktools import NotebookTools - -# Define paths -NOTEBOOK_PATH = "testcase_export_federated/MNIST_Watermarking.ipynb" -ACTUAL_DIR = "testcase_export_federated/test_artifacts/actual" -EXPECTED_DIR = "testcase_export_federated/test_artifacts/expected" - -# Setup for FederatedRuntime -director_info = { - 'director_node_fqdn': 'localhost', - 'director_port': 50050, -} - -authorized_collaborators = ['Bangalore', 'Chandler'] - -# Creating an instance of FederatedRuntime -federated_runtime = FederatedRuntime( - collaborators=authorized_collaborators, - director=director_info, - notebook_path=NOTEBOOK_PATH, - tls=False # Actual testcase tls is set to false -) - -def setup_workspace(): - """Setup function to create the actual workspace for testing.""" - # Ensure the actual directory is empty - if Path(ACTUAL_DIR).exists(): - shutil.rmtree(ACTUAL_DIR) - Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) - - # Use the FederatedRuntime instance to get the parameters - notebook_path = federated_runtime.notebook_path - director_fqdn = federated_runtime.director["director_node_fqdn"] - tls = federated_runtime.tls - - # Generate workspace using NotebookTools - NotebookTools.export_federated( - notebook_path=notebook_path, - output_workspace=ACTUAL_DIR, - director_fqdn=director_fqdn, - tls=tls - ) - -def compare_files(file1, file2): - """Compare the content of two files, ignoring comment lines (lines starting with '#').""" - with open(file1, "r") as f1, open(file2, "r") as f2: - lines1 = f1.readlines() - lines2 = f2.readlines() - - # Remove comment lines (lines starting with '#') - lines1 = [line for line in lines1 if not line.startswith("#")] - lines2 = [line for line in lines2 if not line.startswith("#")] - - return lines1 == lines2 - -def compare_directories(dir1, dir2): - """Compare two directories recursively, including file content.""" - comparison = filecmp.dircmp(dir1, dir2) - - # Check for differences in file names or structure - if comparison.left_only or comparison.right_only: - return False - - # Compare subdirectories - for subdir in comparison.common_dirs: - if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): - return False - - # Compare file content for all common files - for file in comparison.common_files: - file1 = Path(dir1) / file - file2 = Path(dir2) / file - if not compare_files(file1, file2): - return False - - return True - -def test_export_federated_functionality(): - """Test that the workspace generated by NotebookTools matches the Expected Artifacts. - - This function compares the contents of the actual directory generated by - NotebookTools with the expected directory. - """ - # Compare the expected and actual directories - assert compare_directories(EXPECTED_DIR, ACTUAL_DIR), ( - "The workspace generated by NotebookTools does not match the expected. " - "Check the differences in the test_artifacts/expected and test_artifacts/actual folders." - ) \ No newline at end of file diff --git a/tests/openfl/experimental/workflow/NotebookTools/README.md b/tests/openfl/experimental/workflow/NotebookTools/README.md new file mode 100644 index 0000000000..6f48f3bcac --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/README.md @@ -0,0 +1,50 @@ +# Objective + +Validate `NotebookTools.export()` and `NotebookTools.export_federated()` APIs that are used to convert the JupyterNotebook into Workflow API experiments + + +# Test Structure + +``` +tests/openfl/experimental/workflow/NotebookTools + +├── test_export +│ ├── test_artifacts # Actual output of the testcase, and is generated when the test is executed. +│ │ └── expected # Expected output to compare with actual output which is predefined and stored +│ ├── test_101_MNIST # Notebook used for testing +│ └── test_script.py # test script file to run the tests +├── test_export_federated +│ ├── test_artifacts # Actual output of the testcase is generated when the test is executed. +│ │ └── expected # Expected output to compare with actual output which is predefined and stored +│ ├── test_MNIST_Watermarking # Notebook used for testing +│ └── test_script.py # test script file to run the tests +├── READ.md # Readme File +``` + +## Usage + +Ensure that pytest and all dependencies for Workflow Interface are installed in virtual environment + +- For running `test_export` + +Navigate to the directory + +`tests/openfl/experimental/workflow/NotebookTools/test_export` + +To run a specific test case, use below command: + +```sh +pytest -s test_script.py +``` + +- For running `test_export_federated` + +Navigate to the directory + +`tests/openfl/experimental/workflow/NotebookTools/test_export_federated` + +To run a specific test case, use below command: + +```sh +pytest -s test_script.py +``` \ No newline at end of file diff --git a/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_101_MNIST.ipynb b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_101_MNIST.ipynb new file mode 100644 index 0000000000..119bdcfd28 --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_101_MNIST.ipynb @@ -0,0 +1,344 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "14821d97", + "metadata": {}, + "source": [ + "# 101_MNIST Reference for Testing \n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "fc8e35da", + "metadata": {}, + "source": [ + "# Getting Started" + ] + }, + { + "cell_type": "markdown", + "id": "072cac19", + "metadata": {}, + "source": [ + "Initially, we start by specifying the module where cells marked with the `#| export` directive will be automatically exported. \n", + "\n", + "In the following cell, `#| default_exp experiment `indicates that the exported file will be named 'experiment'. This name can be modified based on user's requirement & preferences\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f07a0f3", + "metadata": {}, + "outputs": [], + "source": [ + "#| default_exp experiment" + ] + }, + { + "cell_type": "markdown", + "id": "a30ac7fc", + "metadata": {}, + "source": [ + "Once we have specified the name of the module, subsequent cells of the notebook need to be *appended* by the `#| export` directive as shown below. User should ensure that *all* the notebook functionality required in the Federated Learning experiment is included in this directive" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7f98600", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "!pip install git+https://github.com/securefederatedai/openfl.git\n", + "!pip install torch\n", + "!pip install torchvision\n", + "!pip install -U ipywidgets\n", + "\n", + "# Uncomment this if running in Google Colab and set USERNAME if running in docker container.\n", + "# !pip install -r https://raw.githubusercontent.com/intel/openfl/develop/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt\n", + "# import os\n", + "# os.environ[\"USERNAME\"] = \"colab\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e85e030", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "import torch.optim as optim\n", + "import torch\n", + "import torchvision\n", + "import numpy as np\n", + "\n", + "n_epochs = 3\n", + "batch_size_train = 64\n", + "batch_size_test = 1000\n", + "learning_rate = 0.01\n", + "momentum = 0.5\n", + "log_interval = 10\n", + "\n", + "random_seed = 1\n", + "torch.backends.cudnn.enabled = False\n", + "torch.manual_seed(random_seed)\n", + "\n", + "mnist_train = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=True,\n", + " download=True,\n", + " transform=torchvision.transforms.Compose(\n", + " [\n", + " torchvision.transforms.ToTensor(),\n", + " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "mnist_test = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=False,\n", + " download=True,\n", + " transform=torchvision.transforms.Compose(\n", + " [\n", + " torchvision.transforms.ToTensor(),\n", + " torchvision.transforms.Normalize((0.1307,), (0.3081,)),\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n", + " self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n", + " self.conv2_drop = nn.Dropout2d()\n", + " self.fc1 = nn.Linear(320, 50)\n", + " self.fc2 = nn.Linear(50, 10)\n", + "\n", + " def forward(self, x):\n", + " x = F.relu(F.max_pool2d(self.conv1(x), 2))\n", + " x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n", + " x = x.view(-1, 320)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.dropout(x, training=self.training)\n", + " x = self.fc2(x)\n", + " return F.log_softmax(x)\n", + "\n", + "def inference(network,test_loader):\n", + " network.eval()\n", + " test_loss = 0\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in test_loader:\n", + " output = network(data)\n", + " test_loss += F.nll_loss(output, target, size_average=False).item()\n", + " pred = output.data.max(1, keepdim=True)[1]\n", + " correct += pred.eq(target.data.view_as(pred)).sum()\n", + " test_loss /= len(test_loader.dataset)\n", + " print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(test_loader.dataset),\n", + " 100. * correct / len(test_loader.dataset)))\n", + " accuracy = float(correct / len(test_loader.dataset))\n", + " return accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "precise-studio", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "from copy import deepcopy\n", + "\n", + "from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator\n", + "from openfl.experimental.workflow.runtime import LocalRuntime\n", + "from openfl.experimental.workflow.placement import aggregator, collaborator\n", + "\n", + "\n", + "def FedAvg(models, weights=None):\n", + " new_model = models[0]\n", + " state_dicts = [model.state_dict() for model in models]\n", + " state_dict = new_model.state_dict()\n", + " for key in models[1].state_dict():\n", + " state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts],\n", + " axis=0, \n", + " weights=weights))\n", + " new_model.load_state_dict(state_dict)\n", + " return new_model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "difficult-madrid", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "class FederatedFlow(FLSpec):\n", + "\n", + " def __init__(self, model=None, optimizer=None, rounds=3, **kwargs):\n", + " super().__init__(**kwargs)\n", + " if model is not None:\n", + " self.model = model\n", + " self.optimizer = optimizer\n", + " else:\n", + " self.model = Net()\n", + " self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate,\n", + " momentum=momentum)\n", + " self.rounds = rounds\n", + "\n", + " @aggregator\n", + " def start(self):\n", + " print(f'Performing initialization for model')\n", + " self.collaborators = self.runtime.collaborators\n", + " self.private = 10\n", + " self.current_round = 0\n", + " self.next(self.aggregated_model_validation, foreach='collaborators', exclude=['private'])\n", + "\n", + " @collaborator\n", + " def aggregated_model_validation(self):\n", + " print(f'Performing aggregated model validation for collaborator {self.input}')\n", + " self.agg_validation_score = inference(self.model, self.test_loader)\n", + " print(f'{self.input} value of {self.agg_validation_score}')\n", + " self.next(self.train)\n", + "\n", + " @collaborator\n", + " def train(self):\n", + " self.model.train()\n", + " self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate,\n", + " momentum=momentum)\n", + " train_losses = []\n", + " for batch_idx, (data, target) in enumerate(self.train_loader):\n", + " self.optimizer.zero_grad()\n", + " output = self.model(data)\n", + " loss = F.nll_loss(output, target)\n", + " loss.backward()\n", + " self.optimizer.step()\n", + " if batch_idx % log_interval == 0:\n", + " print('Train Epoch: 1 [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", + " batch_idx * len(data), len(self.train_loader.dataset),\n", + " 100. * batch_idx / len(self.train_loader), loss.item()))\n", + " self.loss = loss.item()\n", + " torch.save(self.model.state_dict(), 'model.pth')\n", + " torch.save(self.optimizer.state_dict(), 'optimizer.pth')\n", + " self.training_completed = True\n", + " self.next(self.local_model_validation)\n", + "\n", + " @collaborator\n", + " def local_model_validation(self):\n", + " self.local_validation_score = inference(self.model, self.test_loader)\n", + " print(\n", + " f'Doing local model validation for collaborator {self.input}: {self.local_validation_score}')\n", + " self.next(self.join, exclude=['training_completed'])\n", + "\n", + " @aggregator\n", + " def join(self, inputs):\n", + " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", + " self.aggregated_model_accuracy = sum(\n", + " input.agg_validation_score for input in inputs) / len(inputs)\n", + " self.local_model_accuracy = sum(\n", + " input.local_validation_score for input in inputs) / len(inputs)\n", + " print(f'Average aggregated model validation values = {self.aggregated_model_accuracy}')\n", + " print(f'Average training loss = {self.average_loss}')\n", + " print(f'Average local model validation values = {self.local_model_accuracy}')\n", + " self.model = FedAvg([input.model for input in inputs])\n", + " self.optimizer = [input.optimizer for input in inputs][0]\n", + " self.current_round += 1\n", + " if self.current_round < self.rounds:\n", + " self.next(self.aggregated_model_validation,\n", + " foreach='collaborators', exclude=['private'])\n", + " else:\n", + " self.next(self.end)\n", + "\n", + " @aggregator\n", + " def end(self):\n", + " print(f'This is the end of the flow')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "forward-world", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "# Setup participants\n", + "aggregator = Aggregator()\n", + "aggregator.private_attributes = {}\n", + "\n", + "# Setup collaborators with private attributes\n", + "collaborator_names = ['Portland', 'Seattle', 'Chandler','Bangalore']\n", + "collaborators = [Collaborator(name=name) for name in collaborator_names]\n", + "for idx, collaborator in enumerate(collaborators):\n", + " local_train = deepcopy(mnist_train)\n", + " local_test = deepcopy(mnist_test)\n", + " local_train.data = mnist_train.data[idx::len(collaborators)]\n", + " local_train.targets = mnist_train.targets[idx::len(collaborators)]\n", + " local_test.data = mnist_test.data[idx::len(collaborators)]\n", + " local_test.targets = mnist_test.targets[idx::len(collaborators)]\n", + " collaborator.private_attributes = {\n", + " 'train_loader': torch.utils.data.DataLoader(local_train,batch_size=batch_size_train, shuffle=True),\n", + " 'test_loader': torch.utils.data.DataLoader(local_test,batch_size=batch_size_train, shuffle=True)\n", + " }\n", + "\n", + "local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend='single_process')\n", + "print(f'Local runtime collaborators = {local_runtime.collaborators}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a175b4d6", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "\n", + "model = None\n", + "best_model = None\n", + "optimizer = None\n", + "flflow = FederatedFlow(model, optimizer, rounds=2, checkpoint=True)\n", + "flflow.runtime = local_runtime" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dir_workspace_3.10", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/.workspace rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/cols.yaml rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/cols.yaml diff --git a/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml new file mode 100644 index 0000000000..8e36aa2703 --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/data.yaml @@ -0,0 +1,8 @@ +Bangalore: + private_attributes: src.experiment.Bangalore_private_attributes +Chandler: + private_attributes: src.experiment.Chandler_private_attributes +Portland: + private_attributes: src.experiment.Portland_private_attributes +Seattle: + private_attributes: src.experiment.Seattle_private_attributes diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/plan/defaults rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml similarity index 55% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml index c9bea91dfa..92b1c686c6 100644 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/plan.yaml @@ -10,11 +10,9 @@ collaborator: federated_flow: settings: checkpoint: true - model: src.experiment.model - optimizer: src.experiment.optimizer - round_number: 0 - watermark_pretrain_optimizer: src.experiment.watermark_pretrain_optimizer - watermark_retrain_optimizer: src.experiment.watermark_retrain_optimizer - template: src.experiment.FederatedFlow_MNIST_Watermarking + model: null + optimizer: null + rounds: 2 + template: src.experiment.FederatedFlow network: defaults: plan/defaults/network.yaml diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt similarity index 53% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt index 8946ff2cac..7486b2d399 100644 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/requirements.txt @@ -1,6 +1,4 @@ wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability torch torchvision -matplotlib -git+https://github.com/pyviz-topics/imagen.git@master -holoviews==1.15.4 +ipywidgets diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/actual/src/__init__.py rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py diff --git a/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py new file mode 100644 index 0000000000..782ece855f --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/experiment.py @@ -0,0 +1,228 @@ +# AUTOGENERATED! DO NOT EDIT! File to edit: ../../../101_MNIST.ipynb. + +# %% auto 0 +__all__ = ['n_epochs', 'batch_size_train', 'batch_size_test', 'learning_rate', 'momentum', 'log_interval', 'random_seed', + 'mnist_train', 'mnist_test', 'aggregator', 'collaborator_names', 'collaborators', 'local_runtime', 'model', + 'best_model', 'optimizer', 'flflow', 'Net', 'inference', 'FedAvg', 'FederatedFlow'] + +# %% ../../../101_MNIST.ipynb 8 + +# Uncomment this if running in Google Colab and set USERNAME if running in docker container. +# import os +# os.environ["USERNAME"] = "colab" + +# %% ../../../101_MNIST.ipynb 10 +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch +import torchvision +import numpy as np + +n_epochs = 3 +batch_size_train = 64 +batch_size_test = 1000 +learning_rate = 0.01 +momentum = 0.5 +log_interval = 10 + +random_seed = 1 +torch.backends.cudnn.enabled = False +torch.manual_seed(random_seed) + +mnist_train = torchvision.datasets.MNIST( + "./files/", + train=True, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + +mnist_test = torchvision.datasets.MNIST( + "./files/", + train=False, + download=True, + transform=torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.1307,), (0.3081,)), + ] + ), +) + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 10, kernel_size=5) + self.conv2 = nn.Conv2d(10, 20, kernel_size=5) + self.conv2_drop = nn.Dropout2d() + self.fc1 = nn.Linear(320, 50) + self.fc2 = nn.Linear(50, 10) + + def forward(self, x): + x = F.relu(F.max_pool2d(self.conv1(x), 2)) + x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) + x = x.view(-1, 320) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = self.fc2(x) + return F.log_softmax(x) + +def inference(network,test_loader): + network.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for data, target in test_loader: + output = network(data) + test_loss += F.nll_loss(output, target, size_average=False).item() + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).sum() + test_loss /= len(test_loader.dataset) + print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( + test_loss, correct, len(test_loader.dataset), + 100. * correct / len(test_loader.dataset))) + accuracy = float(correct / len(test_loader.dataset)) + return accuracy + +# %% ../../../101_MNIST.ipynb 12 +from copy import deepcopy + +from openfl.experimental.workflow.interface import FLSpec, Aggregator, Collaborator +from openfl.experimental.workflow.runtime import LocalRuntime +from openfl.experimental.workflow.placement import aggregator, collaborator + + +def FedAvg(models, weights=None): + new_model = models[0] + state_dicts = [model.state_dict() for model in models] + state_dict = new_model.state_dict() + for key in models[1].state_dict(): + state_dict[key] = torch.from_numpy(np.average([state[key].numpy() for state in state_dicts], + axis=0, + weights=weights)) + new_model.load_state_dict(state_dict) + return new_model + +# %% ../../../101_MNIST.ipynb 14 +class FederatedFlow(FLSpec): + + def __init__(self, model=None, optimizer=None, rounds=3, **kwargs): + super().__init__(**kwargs) + if model is not None: + self.model = model + self.optimizer = optimizer + else: + self.model = Net() + self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, + momentum=momentum) + self.rounds = rounds + + @aggregator + def start(self): + print(f'Performing initialization for model') + self.collaborators = self.runtime.collaborators + self.private = 10 + self.current_round = 0 + self.next(self.aggregated_model_validation, foreach='collaborators', exclude=['private']) + + @collaborator + def aggregated_model_validation(self): + print(f'Performing aggregated model validation for collaborator {self.input}') + self.agg_validation_score = inference(self.model, self.test_loader) + print(f'{self.input} value of {self.agg_validation_score}') + self.next(self.train) + + @collaborator + def train(self): + self.model.train() + self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, + momentum=momentum) + train_losses = [] + for batch_idx, (data, target) in enumerate(self.train_loader): + self.optimizer.zero_grad() + output = self.model(data) + loss = F.nll_loss(output, target) + loss.backward() + self.optimizer.step() + if batch_idx % log_interval == 0: + print('Train Epoch: 1 [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + batch_idx * len(data), len(self.train_loader.dataset), + 100. * batch_idx / len(self.train_loader), loss.item())) + self.loss = loss.item() + torch.save(self.model.state_dict(), 'model.pth') + torch.save(self.optimizer.state_dict(), 'optimizer.pth') + self.training_completed = True + self.next(self.local_model_validation) + + @collaborator + def local_model_validation(self): + self.local_validation_score = inference(self.model, self.test_loader) + print( + f'Doing local model validation for collaborator {self.input}: {self.local_validation_score}') + self.next(self.join, exclude=['training_completed']) + + @aggregator + def join(self, inputs): + self.average_loss = sum(input.loss for input in inputs) / len(inputs) + self.aggregated_model_accuracy = sum( + input.agg_validation_score for input in inputs) / len(inputs) + self.local_model_accuracy = sum( + input.local_validation_score for input in inputs) / len(inputs) + print(f'Average aggregated model validation values = {self.aggregated_model_accuracy}') + print(f'Average training loss = {self.average_loss}') + print(f'Average local model validation values = {self.local_model_accuracy}') + self.model = FedAvg([input.model for input in inputs]) + self.optimizer = [input.optimizer for input in inputs][0] + self.current_round += 1 + if self.current_round < self.rounds: + self.next(self.aggregated_model_validation, + foreach='collaborators', exclude=['private']) + else: + self.next(self.end) + + @aggregator + def end(self): + print(f'This is the end of the flow') + +# %% ../../../101_MNIST.ipynb 16 +# Setup participants +aggregator = Aggregator() +aggregator.private_attributes = {} + +# Setup collaborators with private attributes +collaborator_names = ['Portland', 'Seattle', 'Chandler','Bangalore'] +collaborators = [Collaborator(name=name) for name in collaborator_names] +for idx, collaborator in enumerate(collaborators): + local_train = deepcopy(mnist_train) + local_test = deepcopy(mnist_test) + local_train.data = mnist_train.data[idx::len(collaborators)] + local_train.targets = mnist_train.targets[idx::len(collaborators)] + local_test.data = mnist_test.data[idx::len(collaborators)] + local_test.targets = mnist_test.targets[idx::len(collaborators)] + collaborator.private_attributes = { + 'train_loader': torch.utils.data.DataLoader(local_train,batch_size=batch_size_train, shuffle=True), + 'test_loader': torch.utils.data.DataLoader(local_test,batch_size=batch_size_train, shuffle=True) + } + +local_runtime = LocalRuntime(aggregator=aggregator, collaborators=collaborators, backend='single_process') +print(f'Local runtime collaborators = {local_runtime.collaborators}') + +# %% ../../../101_MNIST.ipynb 18 +model = None +best_model = None +optimizer = None +flflow = FederatedFlow(model, optimizer, rounds=2, checkpoint=True) +flflow.runtime = local_runtime + +runtime_local = flflow._runtime + +runtime_collaborators = runtime_local._LocalRuntime__collaborators +Portland_private_attributes = runtime_collaborators['Portland'].private_attributes +Seattle_private_attributes = runtime_collaborators['Seattle'].private_attributes +Chandler_private_attributes = runtime_collaborators['Chandler'].private_attributes +Bangalore_private_attributes = runtime_collaborators['Bangalore'].private_attributes \ No newline at end of file diff --git a/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_script.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_script.py new file mode 100644 index 0000000000..d523b68241 --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export/test_script.py @@ -0,0 +1,112 @@ +# Copyright (C) 2020-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import shutil +import filecmp +from pathlib import Path +import pytest +from openfl.experimental.workflow.notebooktools import NotebookTools + +class bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKCYAN = "\033[96m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + +# Define paths +NOTEBOOK_PATH = "./test_101_MNIST.ipynb" +ACTUAL_DIR = "test_artifacts/actual" +EXPECTED_DIR = "test_artifacts/expected" + +@pytest.fixture +def setup_workspace(): + """Setup function to create the actual workspace for testing.""" + + print(f"{bcolors.OKBLUE}Setting up the workspace.{bcolors.ENDC}") + # Ensure the actual directory is empty + if Path(ACTUAL_DIR).exists(): + shutil.rmtree(ACTUAL_DIR) + Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) + print(f"{bcolors.OKGREEN}Workspace setup complete.{bcolors.ENDC}") + +def validate_generated_workspace(): + """Validate that the generated workspace matches the expected artifacts.""" + print(f"{bcolors.OKBLUE}Validating the generated workspace.{bcolors.ENDC}") + + # Compare the expected and actual directories + if not compare_directories(EXPECTED_DIR, ACTUAL_DIR): + print(f"{bcolors.FAIL}❌ Test failed - The workspace generated by NotebookTools export does not match the expected.{bcolors.ENDC}") + assert False, "The workspace generated by NotebookTools export functionality does not match the expected." + else: + print(f"{bcolors.OKGREEN}✔️ Test passed - The generated workspace matches the expected artifacts.{bcolors.ENDC}") + +def compare_files(file1, file2): + """Compare the content of two files, ignoring comment lines.""" + with open(file1, "r") as f1, open(file2, "r") as f2: + lines1 = f1.readlines() + lines2 = f2.readlines() + + # Remove comment lines (lines starting with '#') + lines1 = [line for line in lines1 if not line.startswith("#")] + lines2 = [line for line in lines2 if not line.startswith("#")] + + if lines1 == lines2: + print(f"{bcolors.OKGREEN}✅ Successfully compared: {file1} and {file2}{bcolors.ENDC}") + return True + else: + print(f"{bcolors.FAIL}Comparison failed: {file1} and {file2}{bcolors.ENDC}") + print(f"{bcolors.FAIL}Differences:{bcolors.ENDC}") + for line1, line2 in zip(lines1, lines2): + if line1 != line2: + print(f"{bcolors.FAIL}Expected: {line1.strip()}{bcolors.ENDC}") + print(f"{bcolors.FAIL}Actual: {line2.strip()}{bcolors.ENDC}") + return False + +def compare_directories(dir1, dir2): + """Compare two directories recursively, including file content.""" + comparison = filecmp.dircmp(dir1, dir2) + + # Check for differences in file names or structure + if comparison.left_only or comparison.right_only: + print(f"{bcolors.FAIL}Differences found in directory structure: {comparison.left_only} only in {dir1}, {comparison.right_only} only in {dir2}{bcolors.ENDC}") + return False + + # Compare subdirectories, excluding __pycache__ + for subdir in comparison.common_dirs: + if subdir == "__pycache__": + continue + if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): + return False + + # Compare file content for all common files + for file in comparison.common_files: + file1 = Path(dir1) / file + file2 = Path(dir2) / file + print(f"{bcolors.OKCYAN} Comparing files of expected and generated workspace.{bcolors.ENDC}") + if not compare_files(file1, file2): + return False + + return True + +def test_export_functionality(setup_workspace): + """ + Test the workspace generated by NotebookTools export functionality matches the Expected Artifacts. + This function compares the contents of the actual directory generated by + NotebookTools with the expected directory. + """ + # NotebookTools export generate the workspace. + print(f"{bcolors.OKBLUE}Calling ... NotebookTools export functionality to generate the actual workspace.{bcolors.ENDC}") + + NotebookTools.export( + notebook_path=NOTEBOOK_PATH, + output_workspace=ACTUAL_DIR + ) + print(f"{bcolors.OKGREEN}NotebookTools execution complete.{bcolors.ENDC}") + + # Validate that the generated workspace matches the expected output + validate_generated_workspace() diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_MNIST_Watermarking.ipynb similarity index 86% rename from tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_MNIST_Watermarking.ipynb index 0ee4c67681..459fc646d9 100644 --- a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/MNIST_Watermarking.ipynb +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_MNIST_Watermarking.ipynb @@ -6,15 +6,7 @@ "id": "dc13070c", "metadata": {}, "source": [ - "# Federated Runtime: 301_MNIST_Watermarking" - ] - }, - { - "cell_type": "markdown", - "id": "3b7357ef", - "metadata": {}, - "source": [ - "This tutorial is based on the LocalRuntime example [301_MNIST_Watermarking](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/workflow/301_MNIST_Watermarking.ipynb). It has been adapted to demonstrate the FederatedRuntime version of the watermarking workflow. In this tutorial, we will guide you through the process of deploying the watermarking example within a federation, showcasing how to transition from a local setup to a federated environment effectively." + "# MNIST_Watermarking Reference for Testing" ] }, { @@ -55,18 +47,9 @@ "Once we have specified the name of the module, subsequent cells of the notebook need to be *appended* by the `#| export` directive as shown below. User should ensure that *all* the notebook functionality required in the Federated Learning experiment is included in this directive" ] }, - { - "cell_type": "markdown", - "id": "2e19dcf2", - "metadata": {}, - "source": [ - "We start by installing OpenFL and dependencies of the workflow interface \n", - "> These dependencies are required to be exported and become the requirements for the Federated Learning Workspace " - ] - }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "f7475cba", "metadata": {}, "outputs": [], @@ -74,7 +57,6 @@ "#| export\n", "\n", "!pip install git+https://github.com/securefederatedai/openfl.git\n", - "!pip install -r ../../../workflow_interface_requirements.txt\n", "!pip install matplotlib\n", "!pip install torch==2.3.1\n", "!pip install torchvision==0.18.1\n", @@ -83,16 +65,6 @@ "!pip install -U ipywidgets" ] }, - { - "cell_type": "markdown", - "id": "9a6ae8e2", - "metadata": {}, - "source": [ - "We now define our model, optimizer, and some helper functions like we would for any other deep learning experiment \n", - "\n", - "> This cell and all the subsequent cells are important ingredients of the Federated Learning experiment and therefore annotated with the `#| export` directive" - ] - }, { "cell_type": "code", "execution_count": 3, @@ -183,14 +155,6 @@ " return train_loss" ] }, - { - "cell_type": "markdown", - "id": "d0849d57", - "metadata": {}, - "source": [ - "Next we import the `FLSpec` & placement decorators (`aggregator/collaborator`)" - ] - }, { "cell_type": "code", "execution_count": 4, @@ -215,14 +179,6 @@ " return agg_model" ] }, - { - "cell_type": "markdown", - "id": "36ed5e31", - "metadata": {}, - "source": [ - "Let us now define the Workflow for Watermark embedding." - ] - }, { "cell_type": "code", "execution_count": null, @@ -448,18 +404,6 @@ " print(\"This is the end of the flow\")" ] }, - { - "cell_type": "markdown", - "id": "b5371b6d", - "metadata": {}, - "source": [ - "## Defining and Initializing the Federated Runtime\n", - "We initialize the Federated Runtime by providing:\n", - "- `director_info`: The director's connection information \n", - "- `authorized_collaborators`: A list of authorized collaborators\n", - "- `notebook_path`: Path to this Jupyter notebook." - ] - }, { "cell_type": "code", "execution_count": 6, @@ -485,32 +429,6 @@ ")" ] }, - { - "cell_type": "markdown", - "id": "6de9684f", - "metadata": {}, - "source": [ - "The status of the connected Envoys can be checked using the `get_envoys()` method of the `federated_runtime`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f1be87f", - "metadata": {}, - "outputs": [], - "source": [ - "federated_runtime.get_envoys()" - ] - }, - { - "cell_type": "markdown", - "id": "0eaeca25", - "metadata": {}, - "source": [ - "With the federated_runtime now instantiated, we will proceed to deploy the watermarking workspace and run the experiment!" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/.workspace rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/.workspace diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/plan/defaults rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/defaults diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/plan/plan.yaml rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/plan/plan.yaml diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/actual/requirements.txt rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/requirements.txt diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export/test_artifacts/expected/src/__init__.py rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/__init__.py diff --git a/tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py similarity index 100% rename from tests/github/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py rename to tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_artifacts/expected/src/experiment.py diff --git a/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py new file mode 100644 index 0000000000..57ce547bbd --- /dev/null +++ b/tests/openfl/experimental/workflow/NotebookTools/testcase_export_federated/test_script.py @@ -0,0 +1,134 @@ +# Copyright (C) 2020-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import shutil +import filecmp +from pathlib import Path +import pytest +from openfl.experimental.workflow.runtime import FederatedRuntime +from openfl.experimental.workflow.notebooktools import NotebookTools + +class bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKCYAN = "\033[96m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +# Define paths +NOTEBOOK_PATH = "./test_MNIST_Watermarking.ipynb" +ACTUAL_DIR = "test_artifacts/actual" +EXPECTED_DIR = "test_artifacts/expected" + +# Setup for FederatedRuntime +director_info = { + 'director_node_fqdn': 'localhost', + 'director_port': 50050, +} + +authorized_collaborators = ['Bangalore', 'Chandler'] + +# Creating an instance of FederatedRuntime +federated_runtime = FederatedRuntime( + collaborators=authorized_collaborators, + director=director_info, + notebook_path=NOTEBOOK_PATH, + tls=False +) + +@pytest.fixture() +def setup_workspace(): + """Setup function to create the actual workspace for testing.""" + + print(f"{bcolors.OKBLUE}Setting up the workspace.{bcolors.ENDC}") + # Ensure the actual directory is empty + if Path(ACTUAL_DIR).exists(): + shutil.rmtree(ACTUAL_DIR) + Path(ACTUAL_DIR).mkdir(parents=True, exist_ok=True) + +def validate_generated_workspace(): + """Validate that the generated workspace matches the expected artifacts.""" + print(f"{bcolors.OKBLUE}Validating the generated workspace.{bcolors.ENDC}") + + # Compare the expected and actual directories + if not compare_directories(EXPECTED_DIR, ACTUAL_DIR): + print(f"{bcolors.FAIL}❌ Test failed - The workspace generated by NotebookTools export federated function does not match the expected.{bcolors.ENDC}") + assert False, "The workspace generated by NotebookTools export federated functiona does not match the expected." + else: + print(f"{bcolors.OKGREEN}✔️ Test passed - The generated workspace matches the expected artifacts.{bcolors.ENDC}") + +def compare_files(file1, file2): + """Compare the content of two files, ignoring comment lines.""" + with open(file1, "r") as f1, open(file2, "r") as f2: + lines1 = f1.readlines() + lines2 = f2.readlines() + + # Remove comment lines (lines starting with '#') + lines1 = [line for line in lines1 if not line.startswith("#")] + lines2 = [line for line in lines2 if not line.startswith("#")] + + if lines1 == lines2: + print(f"{bcolors.OKGREEN}✅ Successfully compared: {file1} and {file2}{bcolors.ENDC}") + return True + else: + print(f"{bcolors.FAIL}Comparison failed: {file1} and {file2}{bcolors.ENDC}") + print(f"{bcolors.FAIL}Differences:{bcolors.ENDC}") + for line1, line2 in zip(lines1, lines2): + if line1 != line2: + print(f"{bcolors.FAIL}Expected: {line1.strip()}{bcolors.ENDC}") + print(f"{bcolors.FAIL}Actual: {line2.strip()}{bcolors.ENDC}") + return False + +def compare_directories(dir1, dir2): + """Compare two directories recursively, including file content.""" + comparison = filecmp.dircmp(dir1, dir2) + + # Check for differences in file names or structure + if comparison.left_only or comparison.right_only: + print(f"{bcolors.FAIL}Differences found in directory structure: {comparison.left_only} only in {dir1}, {comparison.right_only} only in {dir2}{bcolors.ENDC}") + return False + + # Compare subdirectories, excluding __pycache__ + for subdir in comparison.common_dirs: + if subdir == "__pycache__": + continue + if not compare_directories(Path(dir1) / subdir, Path(dir2) / subdir): + return False + + # Compare file content for all common files + for file in comparison.common_files: + file1 = Path(dir1) / file + file2 = Path(dir2) / file + print(f"{bcolors.OKCYAN} Comparing files of expected and generated workspace.{bcolors.ENDC}") + if not compare_files(file1, file2): + return False + + return True + +def test_export_federated_functionality(setup_workspace): + """ + Test the workspace generated by NotebookTools export federated functionality matches the Expected Artifacts. + This function compares the contents of the actual directory generated by + NotebookTools with the expected directory. + """ + # Use the FederatedRuntime instance to get the parameters + director_fqdn = federated_runtime.director["director_node_fqdn"] + tls = federated_runtime.tls + + print(f"{bcolors.OKBLUE}Calling ... NotebookTools export_federated functionality to generate the actual workspace.{bcolors.ENDC}") + # Generate workspace using NotebookTools + NotebookTools.export_federated( + notebook_path=NOTEBOOK_PATH, + output_workspace=ACTUAL_DIR, + director_fqdn=director_fqdn, + tls=tls + ) + print(f"{bcolors.OKGREEN}NotebookTools execution complete.{bcolors.ENDC}") + + # Validate that the generated workspace matches the expected output + validate_generated_workspace()