From 279c5a9e6e48778fe2913ba708b53ab9c2b0520c Mon Sep 17 00:00:00 2001 From: zhangqi3 Date: Fri, 14 Jan 2022 19:19:30 +0800 Subject: [PATCH] [Release] v0.0.5 --- application/imagenet_example/main.py | 1 - docker/Dockerfile | 203 ++++++++++++++++++ docker/prepare.sh | 18 ++ docs/README.md | 5 + docs/source/api_reference/index.rst | 7 + .../api_reference/mqbench.fake_quantize.rst | 69 ++++++ .../mqbench.nn.intrinsic.modules.rst | 21 ++ .../mqbench.nn.intrinsic.qat.modules.rst | 21 ++ .../mqbench.nn.intrinsic.qat.rst | 18 ++ .../api_reference/mqbench.nn.intrinsic.rst | 19 ++ docs/source/api_reference/mqbench.nn.rst | 18 ++ docs/source/api_reference/mqbench.rst | 95 ++++++++ docs/source/api_reference/mqbench.utils.rst | 45 ++++ .../ImageClassification/Benchmark.rst | 43 ++++ .../NaturalLanguageProcessing/Benchmark.rst | 4 + .../benchmark/ObjectDetection/Benchmark.rst | 4 + docs/source/benchmark/index.rst | 10 + .../developer_guide/algorithm/add_ptq.rst | 67 ++++++ .../developer_guide/algorithm/add_qat.rst | 34 +++ .../developer_guide/backend/add_backend.rst | 86 ++++++++ .../backend/add_backends_in_3_steps.rst | 202 +++++++++++++++++ docs/source/developer_guide/backend/backq.png | Bin 0 -> 41459 bytes docs/source/developer_guide/backend/fakeq.png | Bin 0 -> 36306 bytes docs/source/developer_guide/backend/onnx.png | Bin 0 -> 21886 bytes docs/source/developer_guide/index.rst | 14 ++ .../developer_guide/pattern/add_pattern.rst | 80 +++++++ docs/source/get_started/index.rst | 12 ++ .../get_started/quick_start_academic.rst | 76 +++++++ .../source/get_started/quick_start_deploy.rst | 57 +++++ docs/source/get_started/setup.rst | 13 ++ docs/source/get_started/support_matrix.rst | 24 +++ docs/source/index.rst | 11 +- docs/source/user_guide/PTQ/adaround.rst | 65 ++++++ docs/source/user_guide/PTQ/naive.rst | 33 +++ docs/source/user_guide/QAT/naive.rst | 44 ++++ docs/source/user_guide/algorithm/index.rst | 107 +++++++++ docs/source/user_guide/deploy/snpe.rst | 58 +++++ docs/source/user_guide/deploy/tensorrt.rst | 56 +++++ docs/source/user_guide/hardware/index.rst | 9 + docs/source/user_guide/hardware/nnie.rst | 88 ++++++++ docs/source/user_guide/hardware/snpe.rst | 29 +++ docs/source/user_guide/hardware/tensorrt.rst | 23 ++ docs/source/user_guide/howtodeploy.rst | 10 + docs/source/user_guide/howtoptq.rst | 8 + docs/source/user_guide/howtoqat.rst | 7 + docs/source/user_guide/index.rst | 11 + .../user_guide/internal/learn_config.rst | 66 ++++++ mqbench/adaround.py | 25 ++- mqbench/convert_deploy.py | 25 ++- mqbench/custom_quantizer.py | 17 +- mqbench/deploy/deploy_linear.py | 13 +- mqbench/fake_quantize/adaround_quantizer.py | 3 - mqbench/fake_quantize/quantize_base.py | 2 +- mqbench/fuser_method_mappings.py | 3 +- .../nn/intrinsic/qat/modules/deconv_fused.py | 2 +- .../nn/intrinsic/qat/modules/linear_fused.py | 1 + mqbench/observer.py | 177 ++++++++------- mqbench/prepare_by_platform.py | 56 ++++- mqbench/quantization/__init__.py | 0 mqbench/tools/__init__.py | 0 mqbench/utils/state.py | 23 ++ 61 files changed, 2106 insertions(+), 132 deletions(-) create mode 100644 docker/Dockerfile create mode 100644 docker/prepare.sh create mode 100644 docs/README.md create mode 100644 docs/source/api_reference/index.rst create mode 100644 docs/source/api_reference/mqbench.fake_quantize.rst create mode 100644 docs/source/api_reference/mqbench.nn.intrinsic.modules.rst create mode 100644 docs/source/api_reference/mqbench.nn.intrinsic.qat.modules.rst create mode 100644 docs/source/api_reference/mqbench.nn.intrinsic.qat.rst create mode 100644 docs/source/api_reference/mqbench.nn.intrinsic.rst create mode 100644 docs/source/api_reference/mqbench.nn.rst create mode 100644 docs/source/api_reference/mqbench.rst create mode 100644 docs/source/api_reference/mqbench.utils.rst create mode 100644 docs/source/benchmark/ImageClassification/Benchmark.rst create mode 100644 docs/source/benchmark/NaturalLanguageProcessing/Benchmark.rst create mode 100644 docs/source/benchmark/ObjectDetection/Benchmark.rst create mode 100644 docs/source/benchmark/index.rst create mode 100644 docs/source/developer_guide/algorithm/add_ptq.rst create mode 100644 docs/source/developer_guide/algorithm/add_qat.rst create mode 100644 docs/source/developer_guide/backend/add_backend.rst create mode 100644 docs/source/developer_guide/backend/add_backends_in_3_steps.rst create mode 100644 docs/source/developer_guide/backend/backq.png create mode 100644 docs/source/developer_guide/backend/fakeq.png create mode 100644 docs/source/developer_guide/backend/onnx.png create mode 100644 docs/source/developer_guide/index.rst create mode 100644 docs/source/developer_guide/pattern/add_pattern.rst create mode 100644 docs/source/get_started/index.rst create mode 100644 docs/source/get_started/quick_start_academic.rst create mode 100644 docs/source/get_started/quick_start_deploy.rst create mode 100644 docs/source/get_started/setup.rst create mode 100644 docs/source/get_started/support_matrix.rst create mode 100644 docs/source/user_guide/PTQ/adaround.rst create mode 100644 docs/source/user_guide/PTQ/naive.rst create mode 100644 docs/source/user_guide/QAT/naive.rst create mode 100644 docs/source/user_guide/algorithm/index.rst create mode 100644 docs/source/user_guide/deploy/snpe.rst create mode 100644 docs/source/user_guide/deploy/tensorrt.rst create mode 100644 docs/source/user_guide/hardware/index.rst create mode 100644 docs/source/user_guide/hardware/nnie.rst create mode 100644 docs/source/user_guide/hardware/snpe.rst create mode 100644 docs/source/user_guide/hardware/tensorrt.rst create mode 100644 docs/source/user_guide/howtodeploy.rst create mode 100644 docs/source/user_guide/howtoptq.rst create mode 100644 docs/source/user_guide/howtoqat.rst create mode 100644 docs/source/user_guide/index.rst create mode 100644 docs/source/user_guide/internal/learn_config.rst create mode 100644 mqbench/quantization/__init__.py create mode 100644 mqbench/tools/__init__.py diff --git a/application/imagenet_example/main.py b/application/imagenet_example/main.py index f7cc93a..1eff913 100644 --- a/application/imagenet_example/main.py +++ b/application/imagenet_example/main.py @@ -162,7 +162,6 @@ def main_worker(gpu, ngpus_per_node, args): # quantize model if args.quant: model = prepare_by_platform(model, args.backend) - if not torch.cuda.is_available(): print('using CPU, this will be slow') elif args.distributed: diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..c06be93 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,203 @@ +FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04 + +# Due to bad internet issue +RUN rm /etc/apt/sources.list.d/* + +# prepare build dir tmp +RUN chmod 1777 /tmp \ + && mkdir /scratch \ + && chmod 1777 /scratch + +# install build needed tools +RUN apt-get update -y \ + && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata \ + && apt-get install -y --no-install-recommends \ + software-properties-common \ + apt-transport-https \ + autoconf \ + automake \ + bc \ + build-essential \ + bzip2 \ + ca-certificates \ + curl \ + g++ \ + gdb \ + git \ + gnupg \ + locales \ + libboost-all-dev \ + libgflags-dev \ + libgoogle-glog-dev \ + libgtest-dev \ + libjson-c-dev \ + libjsoncpp-dev \ + libssl-dev \ + libtool \ + libunwind-dev \ + make \ + openssh-client \ + openssl \ + # python3 \ + python3-dev \ + # python3-minimal \ + # python3-numpy \ + # python3-opencv \ + python3-pip \ + # python3-setuptools \ + # python3-venv \ + software-properties-common \ + sudo \ + tree \ + unzip \ + vim \ + wget \ + yasm \ + zstd \ + libtool + +# set python +ENV PYTHONPATH=/opt/python:$PYTHONPATH +ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH + +ENV PYENV_ROOT=/opt/pyenv \ + PATH=/opt/pyenv/shims:/opt/pyenv/bin:$PATH + +RUN git clone --depth 1 https://github.com/pyenv/pyenv.git /opt/pyenv \ + && eval "$(pyenv init -)" \ + && env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install 3.6.4 \ + && pyenv global 3.6.4 + +RUN pip install --upgrade pip +RUN pip install torch==1.8.1 torchvision flake8 scipy pytest + + +# RUN mkdir /opt/hub && wget https://github.com/pytorch/vision/archive/master.zip -P /opt/hub && unzip /opt/hub/master.zip -d /opt/hub && mv /opt/hub/vision-master/ /opt/hub/pytorch_vision_master +ENV TORCH_HOME=/opt/ + +RUN apt-get install -y \ + libavcodec-dev \ + libavformat-dev \ + libeigen3-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer1.0-dev \ + libgtest-dev \ + libgtk-3-dev \ + libgtk2.0-dev \ + libhdf5-dev \ + libjpeg-dev \ + libopenexr-dev \ + libpng-dev \ + libswscale-dev \ + libtiff-dev \ + libwebp-dev \ + # opencl-clhpp-headers \ + # opencl-headers \ + # pocl-opencl-icd \ + rpm \ + && add-apt-repository -y ppa:ubuntu-toolchain-r/test \ + && apt-get install -y \ + gcc-8 \ + g++-8 \ + gcc-9 \ + g++-9 \ + && wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null \ + && apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main' \ + && apt-get update -y \ + && apt-get install -y \ + cmake=3.16.0-0kitware1 \ + cmake-data=3.16.0-0kitware1 \ + kitware-archive-keyring \ + && apt-get install -y ffmpeg + +# set locales +RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen \ + && echo "LC_ALL=en_US.UTF-8" >> /etc/environment \ + && echo "LANG=en_US.UTF-8" > /etc/locale.conf \ + && locale-gen en_US.UTF-8 \ + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + && apt-get install -y dialog \ + && dpkg-reconfigure --frontend noninteractive locales + + +# build gtest +RUN cd /usr/src/gtest \ + && mkdir -p build \ + && cd build \ + && cmake .. \ + && make \ + && make install + +# set glob +RUN cd /tmp \ + && wget --progress=dot:mega -O glog.0.4.0.tar.gz https://codeload.github.com/google/glog/tar.gz/v0.4.0 \ + && tar -xvf glog.0.4.0.tar.gz \ + && cd glog-0.4.0 \ + && ./autogen.sh \ + && mkdir build \ + && cd build \ + && cmake -DBUILD_SHARED_LIBS=ON .. \ + && make -j 4 \ + && make install \ + && rm -fr /tmp/* + + +# set protobuf +RUN /tmp; wget --progress=dot:mega https://codeload.github.com/google/protobuf/zip/v3.4.0 \ + && unzip v3.4.0 \ + && cd protobuf-3.4.0 \ + && ./autogen.sh \ + && ./configure \ + && make -j 4 \ + && make install \ + && ldconfig \ + && rm -fr /tmp/* + +# set gflag +RUN cd /tmp; wget --progress=dot:mega https://github.com/gflags/gflags/archive/v2.2.2.tar.gz \ + && tar xvf v2.2.2.tar.gz \ + && cd gflags-2.2.2 \ + && mkdir build \ + && cd build \ + && cmake -DBUILD_SHARED_LIBS=ON .. \ + && make -j 4 \ + && make install \ + && rm -fr /tmp/* + +# set pybind +RUN cd /tmp; git clone https://github.com/pybind/pybind11.git \ + && cd pybind11 \ + && git checkout v2.5.0 \ + && mkdir build \ + && cd build \ + && cmake -DPYBIND11_TEST=OFF .. \ + && make \ + && make install \ + && rm -fr /tmp/* \ + && chmod 777 /usr/lib/python3/dist-packages + +# set xir +RUN cd /tmp \ + && wget -O libunilog.deb https://www.xilinx.com/bin/public/openDownload?filename=libunilog_1.4.1-r82_amd64.deb \ + && wget -O libtarget-factory.deb https://www.xilinx.com/bin/public/openDownload?filename=libtarget-factory_1.4.1-r85_amd64.deb \ + && wget -O libxir.deb https://www.xilinx.com/bin/public/openDownload?filename=libxir_1.4.1-r91_amd64.deb \ + && wget -O libvart.deb https://www.xilinx.com/bin/public/openDownload?filename=libvart_1.4.1-r130_amd64.deb \ + && wget -O libvitis_ai_library.deb https://www.xilinx.com/bin/public/openDownload?filename=libvitis_ai_library_1.4.1-r114_amd64.deb \ + && wget -O librt-engine.deb https://www.xilinx.com/bin/public/openDownload?filename=librt-engine_1.4.1-r195_amd64.deb \ + && wget -O aks.deb https://www.xilinx.com/bin/public/openDownload?filename=aks_1.4.1-r78_amd64.deb \ + && apt-get install -y --no-install-recommends /tmp/*.deb \ + && rm -rf /tmp/* \ + && ldconfig + +# install mqbench requirement +RUN pip install urllib3 onnx + +# set xir path +ENV PYTHONPATH=/usr/lib/python3/dist-packages/:$PYTHONPATH + +# install mqbench +RUN cd /root/ \ + && git clone https://github.com/ModelTC/MQBench.git \ + && cd MQBench \ + && python setup.py develop + diff --git a/docker/prepare.sh b/docker/prepare.sh new file mode 100644 index 0000000..67242a6 --- /dev/null +++ b/docker/prepare.sh @@ -0,0 +1,18 @@ +#! /bin/bash + + +# install docker +curl https://get.docker.com | sh \ + && sudo systemctl --now enable docker + +# add nvidia docker repo +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ + && curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \ + && curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + + +sudo apt-get update +sudo apt-get install -y nvidia-docker2 +sudo systemctl restart docker + +docker run --rm --gpus all nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04 nvidia-smi \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..403233b --- /dev/null +++ b/docs/README.md @@ -0,0 +1,5 @@ +# Project Doc Rebuild + +## Attention + +此commit中相较前一个版本需要重新安排的文档在source/resource文件夹里,请重新编辑并关联。 \ No newline at end of file diff --git a/docs/source/api_reference/index.rst b/docs/source/api_reference/index.rst new file mode 100644 index 0000000..ee854aa --- /dev/null +++ b/docs/source/api_reference/index.rst @@ -0,0 +1,7 @@ +API Reference +============== + +.. toctree:: + :maxdepth: 4 + + mqbench diff --git a/docs/source/api_reference/mqbench.fake_quantize.rst b/docs/source/api_reference/mqbench.fake_quantize.rst new file mode 100644 index 0000000..c47214c --- /dev/null +++ b/docs/source/api_reference/mqbench.fake_quantize.rst @@ -0,0 +1,69 @@ +mqbench.fake\_quantize package +============================== + +Submodules +---------- + +mqbench.fake\_quantize.dorefa +------------------------------------ + +.. automodule:: mqbench.fake_quantize.dorefa + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.dsq +--------------------------------- + +.. automodule:: mqbench.fake_quantize.dsq + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.fixed +----------------------------------- + +.. automodule:: mqbench.fake_quantize.fixed + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.lsq +--------------------------------- + +.. automodule:: mqbench.fake_quantize.lsq + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.nnie +---------------------------------- + +.. automodule:: mqbench.fake_quantize.nnie + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.pact +---------------------------------- + +.. automodule:: mqbench.fake_quantize.pact + :members: + :undoc-members: + :show-inheritance: + +mqbench.fake\_quantize.quantize\_base +-------------------------------------------- + +.. automodule:: mqbench.fake_quantize.quantize_base + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mqbench.fake_quantize + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.nn.intrinsic.modules.rst b/docs/source/api_reference/mqbench.nn.intrinsic.modules.rst new file mode 100644 index 0000000..31ad8ea --- /dev/null +++ b/docs/source/api_reference/mqbench.nn.intrinsic.modules.rst @@ -0,0 +1,21 @@ +mqbench.nn.intrinsic.modules package +==================================== + +Submodules +---------- + +mqbench.nn.intrinsic.modules.fused +----------------------------------------- + +.. automodule:: mqbench.nn.intrinsic.modules.fused + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mqbench.nn.intrinsic.modules + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.nn.intrinsic.qat.modules.rst b/docs/source/api_reference/mqbench.nn.intrinsic.qat.modules.rst new file mode 100644 index 0000000..65ded79 --- /dev/null +++ b/docs/source/api_reference/mqbench.nn.intrinsic.qat.modules.rst @@ -0,0 +1,21 @@ +mqbench.nn.intrinsic.qat.modules package +======================================== + +Submodules +---------- + +mqbench.nn.intrinsic.qat.modules.linear\_fused +----------------------------------------------------- + +.. automodule:: mqbench.nn.intrinsic.qat.modules.linear_fused + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mqbench.nn.intrinsic.qat.modules + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.nn.intrinsic.qat.rst b/docs/source/api_reference/mqbench.nn.intrinsic.qat.rst new file mode 100644 index 0000000..5e13f06 --- /dev/null +++ b/docs/source/api_reference/mqbench.nn.intrinsic.qat.rst @@ -0,0 +1,18 @@ +mqbench.nn.intrinsic.qat package +================================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mqbench.nn.intrinsic.qat.modules + +Module contents +--------------- + +.. automodule:: mqbench.nn.intrinsic.qat + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.nn.intrinsic.rst b/docs/source/api_reference/mqbench.nn.intrinsic.rst new file mode 100644 index 0000000..b68bedf --- /dev/null +++ b/docs/source/api_reference/mqbench.nn.intrinsic.rst @@ -0,0 +1,19 @@ +mqbench.nn.intrinsic package +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mqbench.nn.intrinsic.modules + mqbench.nn.intrinsic.qat + +Module contents +--------------- + +.. automodule:: mqbench.nn.intrinsic + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.nn.rst b/docs/source/api_reference/mqbench.nn.rst new file mode 100644 index 0000000..3e580c9 --- /dev/null +++ b/docs/source/api_reference/mqbench.nn.rst @@ -0,0 +1,18 @@ +mqbench.nn package +================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mqbench.nn.intrinsic + +Module contents +--------------- + +.. automodule:: mqbench.nn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.rst b/docs/source/api_reference/mqbench.rst new file mode 100644 index 0000000..da91af1 --- /dev/null +++ b/docs/source/api_reference/mqbench.rst @@ -0,0 +1,95 @@ +mqbench package +=============== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + mqbench.fake_quantize + mqbench.nn + mqbench.utils + +Submodules +---------- + +mqbench.adaround +----------------------- + +.. automodule:: mqbench.adaround + :members: + :undoc-members: + :show-inheritance: + +mqbench.convert\_deploy +------------------------------ + +.. automodule:: mqbench.convert_deploy + :members: + :undoc-members: + :show-inheritance: + +mqbench.convert\_onnx +---------------------------- + +.. automodule:: mqbench.convert_onnx + :members: + :undoc-members: + :show-inheritance: + +mqbench.custom\_quantizer +-------------------------------- + +.. automodule:: mqbench.custom_quantizer + :members: + :undoc-members: + :show-inheritance: + +mqbench.custom\_symbolic\_opset +-------------------------------------- + +.. automodule:: mqbench.custom_symbolic_opset + :members: + :undoc-members: + :show-inheritance: + +mqbench.fuser\_method\_mappings +-------------------------------------- + +.. automodule:: mqbench.fuser_method_mappings + :members: + :undoc-members: + :show-inheritance: + +mqbench.fusion\_method +----------------------------- + +.. automodule:: mqbench.fusion_method + :members: + :undoc-members: + :show-inheritance: + +mqbench.observer +----------------------- + +.. automodule:: mqbench.observer + :members: + :undoc-members: + :show-inheritance: + +mqbench.prepare\_by\_platform +------------------------------------ + +.. automodule:: mqbench.prepare_by_platform + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mqbench + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api_reference/mqbench.utils.rst b/docs/source/api_reference/mqbench.utils.rst new file mode 100644 index 0000000..c7e393f --- /dev/null +++ b/docs/source/api_reference/mqbench.utils.rst @@ -0,0 +1,45 @@ +mqbench.utils package +===================== + +Submodules +---------- + +mqbench.utils.logger +--------------------------- + +.. automodule:: mqbench.utils.logger + :members: + :undoc-members: + :show-inheritance: + +mqbench.utils.registry +----------------------------- + +.. automodule:: mqbench.utils.registry + :members: + :undoc-members: + :show-inheritance: + +mqbench.utils.state +-------------------------- + +.. automodule:: mqbench.utils.state + :members: + :undoc-members: + :show-inheritance: + +mqbench.utils.utils +-------------------------- + +.. automodule:: mqbench.utils.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mqbench.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/benchmark/ImageClassification/Benchmark.rst b/docs/source/benchmark/ImageClassification/Benchmark.rst new file mode 100644 index 0000000..509cc8a --- /dev/null +++ b/docs/source/benchmark/ImageClassification/Benchmark.rst @@ -0,0 +1,43 @@ +Image Classification Benchmark +============================== + +Generally, we follow the `PyTorch official example `_ to build the example of Model Quantization Benchmark for ImageNet classification task. +- Download the ImageNet dataset from `the official website `_ + + - Move validation images to labeled subfolders, using `the following shell script `_ + +**Post-training Quantization**: + +- Backend: Academic + ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| W_calibration | A_calibration | Backend | wbit | abit | resnet18 | resnet50 | mobilenetv2_1.0 | regnetx600m | regnetx800m | ++===============+===============+==========+======+======+==========+==========+=================+=============+=============+ +| None | None | Academic | 32 | 32 | 71.06 | 77.94 | 72.68 | 73.60 | 74.83 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MinMax | EMAMinMax | Academic | 8 | 8 | 70.93 | 77.67 | 72.51 | 73.48 | 74.85 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MinMax | EMAQuantile | Academic | 8 | 8 | 70.89 | 77.70 | 72.52 | 73.51 | 74.82 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MSE | EMAMSE | Academic | 8 | 8 | 70.88 | 77.89 | 72.55 | 73.61 | 74.83 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MinMax | EMAMinMax | Academic | 4 | 8 | 52.25 | 73.50 | 24.76 | 60.37 | 57.25 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MinMax | EMAQuantile | Academic | 4 | 8 | 52.20 | 73.45 | 24.75 | 60.26 | 57.38 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MSE | EMAMSE | Academic | 4 | 8 | 54.96 | 72.60 | 27.03 | 62.30 | 57.93 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| AdaRound | EMAMSE | Academic | 4 | 8 | 70.35 | 76.87 | 71.82 | 72.32 | 73.58 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ + +- Backend: TensorRT + ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| W_calibration | A_calibration | Backend | wbit | abit | resnet18 | resnet50 | mobilenetv2_1.0 | regnetx600m | regnetx800m | ++===============+===============+==========+======+======+==========+==========+=================+=============+=============+ +| None | None | TensorRT | 32 | 32 | 70.63 | 77.94 | 72.68 | 73.60 | 74.83 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MinMax | EMAMinMax | TensorRT | 8 | 8 | 70.33 | 76.72 | 72.50 | 73.28 | 74.75 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ +| MSE | EMAMSE | TensorRT | 8 | 8 | 70.55 | 77.79 | 72.56 | 73.41 | 74.70 | ++---------------+---------------+----------+------+------+----------+----------+-----------------+-------------+-------------+ diff --git a/docs/source/benchmark/NaturalLanguageProcessing/Benchmark.rst b/docs/source/benchmark/NaturalLanguageProcessing/Benchmark.rst new file mode 100644 index 0000000..7311451 --- /dev/null +++ b/docs/source/benchmark/NaturalLanguageProcessing/Benchmark.rst @@ -0,0 +1,4 @@ +Natural Language Processing Benchmark +===================================== + +To be finished. \ No newline at end of file diff --git a/docs/source/benchmark/ObjectDetection/Benchmark.rst b/docs/source/benchmark/ObjectDetection/Benchmark.rst new file mode 100644 index 0000000..c0afdbb --- /dev/null +++ b/docs/source/benchmark/ObjectDetection/Benchmark.rst @@ -0,0 +1,4 @@ +Object Detection Benchmark +========================== + +To be finished. diff --git a/docs/source/benchmark/index.rst b/docs/source/benchmark/index.rst new file mode 100644 index 0000000..c1d1415 --- /dev/null +++ b/docs/source/benchmark/index.rst @@ -0,0 +1,10 @@ +Benchmark +========= + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + Image Classification Benchmark + Object Detection Benchmark + Natural Language Processing Benchmark diff --git a/docs/source/developer_guide/algorithm/add_ptq.rst b/docs/source/developer_guide/algorithm/add_ptq.rst new file mode 100644 index 0000000..c12aef3 --- /dev/null +++ b/docs/source/developer_guide/algorithm/add_ptq.rst @@ -0,0 +1,67 @@ +Develop PTQ with MQBench +======================== + +Assume we get a PTQ algorithm, which needs certain layers' input and certain layers' output for calibration. Also, some calib data provided. And thanks to the prepared model, we do not have to be disturbed by quant/calib/float mode choose. There are only a few steps to imply a PTQ in MQBench. + +1. A fake quantizer. +2. Data hooks to get intra-network feature maps. +3. A loss function used in calibration. + + +Like stated in :ref:`add_a_backend_to_mqbench`, a self-defined quantizer may be required for the PTQ. + +Usually PTQ will adjust the weight via some quantization affine and backward in calibration, whichs need intra-network feature maps. We provided a hook ``mqbench.utils.hooks.DataSaverHook`` to catch input/output of a certain module. Just call it with ``torch.nn.Module.register_forward_hook`` like this, and similarly you can catch the gradients input/outputs: + +.. code-block:: python + :linenos: + + def save_inp_oup_data(model: GraphModule, module: Module, cali_data: list, store_inp=True, store_oup=True): + assert (not store_inp or not store_oup) + data_saver = DataSaverHook(store_input=store_inp, store_output=store_oup, stop_forward=True) + handle = module.register_forward_hook(data_saver) + cached = [] + with torch.no_grad(): + for batch in cali_data: + try: + _ = model(batch.to(device)) + except StopForwardException: + pass + if store_inp: + cached.append([inp.detach() for inp in data_saver.input_store]) + if store_oup: + cached.append(data_saver.output_store.detach()) + handle.remove() + return cached + + +Then you can design the PTQ function like: + + +.. code-block:: python + :linenos: + + + def PTQ(model, data, *args, **kwargs): + ptq_model = deepcopy_graphmodule(model) + # diable the original model's update + model.eval() + # turn the original model into float + disable_all(model) + # turn the ptq model into quant + enable_quantization(ptq_model) + nodes = list(model.graph.nodes) + modules = dict(model.named_modules()) + quant_modules = dict(ptq_model.named_modules()) + for node in nodes: + if node.op == "call_module" and isinstance(modules[node.target], _PTQ_SUPPORT_TYPE): + module = modules[node.target] + quant_module = quant_modules[node.target] + cached_oups = save_inp_oup_data(model, module, cali_data, + store_inp=False, store_oup=True) + cached_inps = save_inp_oup_data(quant_model, quant_module, cali_data, + store_inp=True, store_oup=False) + # this will update the quant_module's params + do_your_calibration(quant_module, cached_inps, cached_oups) + return quant_model + + diff --git a/docs/source/developer_guide/algorithm/add_qat.rst b/docs/source/developer_guide/algorithm/add_qat.rst new file mode 100644 index 0000000..c31ea99 --- /dev/null +++ b/docs/source/developer_guide/algorithm/add_qat.rst @@ -0,0 +1,34 @@ +Develop QAT with MQBench +================================ + +Given a model which has been prepared by ``mqbench.prepare_by_platform``, fake quantize nodes are accessible for further training. Then, we divide parameters into two groups: normal parameters and quantization parameters: + +.. code-block:: python + :linenos: + + # SomeFakeQuantize + normal_params = [] + quantization_params = [] + for n, m in model.named_modules(): + if isinstance(m, SomeFakeQuantize): + quantization_params.append(m.parameters()) + else: + normal_params.append(m.parameters()) + + +Then get them into optimizer, e.g.: + +.. code-block:: python + :linenos: + + # normal_lr + # quant_lr + # default_lr + opt = optim.SGD( + [ + {'params': quantization_params, 'lr': quant_lr}, + {'params': normal_params, 'lr': normal_lr}, + ], lr=default_lr + ) + +So, as shown above, a QAT model gets a fake quantization module and its related quantization parameters for subsequent quantization aware training. To learn more about how to add a fake quantize, see also :ref:`add_a_backend_to_mqbench`. diff --git a/docs/source/developer_guide/backend/add_backend.rst b/docs/source/developer_guide/backend/add_backend.rst new file mode 100644 index 0000000..8c5fb22 --- /dev/null +++ b/docs/source/developer_guide/backend/add_backend.rst @@ -0,0 +1,86 @@ +A Brief about How MQBench Supports Hardware and Software +====================================================== + +MQBench is able to do quantization over many different backends and quantization algorithms, which relys on the independence of hardware(backends) and software(algorithms). To support a new backend, h/w and s/w should be added. + +We provide a typical work-flow: + +1. Add ``FakeQuantizeAffine`` to simulate the hardware behavior. +2. Add corresponding ``Observer`` to provide ``FakeQuantizeAffine`` with needed infomation. +3. Add ``ModelQuantizer`` to insert quantize nodes into the model. +4. Add ONNX-Backend translator to deploy the output graph. + + + +FakeQuantize, Observer and Quantization Scheme +---------------------------------------------- + +When the model is at calib mode, ``Observer`` collects needed statistics and ``FakeQuantize`` does not quantize the input; when at quant mode, ``FakeQuantize`` performs the quantized forward with the qparams calulated bt ``Observer``. + +To add a ``FakeQuantizeAffine``, refer to ``mqbench/fake_quantize/``. + + +1. Fucntion: Add a fake quantization function to describe the quantized forward. If the function is not differential, also describe the backward. +2. Autograd and Symbolic: Wrap the quantization forward/backward function with ``torch.autograd.Function``. To enable onnx export, a ``symbolic`` function is also needed. +3. Inherit the QuantizeBase: Define the class which holds the ``qparams`` and performs the quantized forward. This class inherit ``mqbench.fake_quantize.quantize_base.QuantizeBase``. +4. Add a Observer if needed: Inherit ``ObserverBase``, collect statistics in ``forward`` and get qparams in ``calculate_qparams``. +5. Use it in prepare_by_platform + + i. Import your class at the ``mqbench.prepare_by_platform``. + ii. Add your backend type into the Enumeration ``mqbench.prepare_by_platform.BackendType``. + iii. Define its default **scheme** at ``mqbench.prepare_by_platform.ParamsTable``. + iv. Add mappings to and ``mqbench.prepare_by_platform.FakeQuantizeDict``. + +For now, we have provided some linear quantization affines/observers so it might reduce lots of work. + + +Custom Quantizer +---------------- + +To prepare the model by platform, quantization nodes should be inserted by and **only by** ``ModelQuantizer``, which hides all details about the needed graph. After preparation, the model could be used to QAT or PTQ. + +The ModelQuantizer will do: + +1. Op fusion, like Conv2d+BN2d+ReLU -> ConvBnReLU2d. +2. QAT swap. Fuse the modules into QAT modules and insert quantize node for weights. +3. Insert quantize nodes after activation. Need to find whose output will be quantized in the backend. + +Op Fusion +^^^^^^^^^ + +The fusion is a torch API and you need to edit the torch default fuser method mappings and patterns because the torch fuser does not accept any other args. + +If you add some new fusion patterns: + +1. Add the fusion patterns and related fusion method into ``mqbench.fuser_method_mapping``. This will turn some op patterns into intrinsic fused modules. If you create new intrinsic modules, add them into ``mqbench.nn.intrinsic``. If you apply new fusion method, add them into ``mqbench.utils.fusion``. +2. Add the mapping from intrinsic modules into qat modules. Define the modules at ``mqbench.nn.intrinsic.qat`` if needed. +3. Add all the metioned things to ``mqbench.fuser_method_mapping.fuse_custom_config_dict``. +4. **NOTE**: If the pattern only apply to certain backend, you need to update the default dicts at ModelQuantizer, rather than ``fuse_custom_config_dict``! + +QAT Swap +^^^^^^^^ + +Swap the intrinsic modules into qat ones. Add the mappings into ``mqbench.fuser_method_mapping`` if it is a universal mapping. If not, into ModelQuantizer's additional_qat_module_mappings. + +We deploy bias-quant intrinsic and qat modules, so just refer to the ``mqbench.nn`` to see how to add one by yourself. + +Insert Quantize nodes +^^^^^^^^^^^^^^^^^^^^^^ + +Usually the TensorRT's ModelQuantizer is a good example, which quantizes all quantizable input tensors of module. If you need to quantize other nodes, just add them to the set with your own logic. + + +Deploy Functions +---------------- + +The deploy stage is to turn our torch-based model into ONNX and graphs based on backend. Typically, we will merge bn into conv/deconv/linear, convert the quantized model into onnx and finally remove all fake quantize op and deploy them. The functions are defined at ``mqbench.convert_deploy`` and regisered by ``@register_deploy_function(BackendType)``. + +Deploy stage might be the hardest part of the flow, for it takes care of all h/w details. The normal flow will be like: + +1. No extra work needed for merging bn and conver onnx. +2. For removing fake_quantize and collecting qparams, you can check the ``mqbench.deploy``. There are examples for linear/logarithmic quantization and self-defined onnx runtime. +3. Also, there are platforms support standard onnx quantization. If your platform supports this, please export FakeQuantizeAffine into onnxruntime QDQOperators. [1]_ [2]_ +4. If extra tools are needed to complete the translation, just integrate your tools into ``mqbench.deploy``. And there is also an example of Vitis-AI. + +.. [1] https://onnxruntime.ai/docs/performance/quantization.html +.. [2] https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/registry.py diff --git a/docs/source/developer_guide/backend/add_backends_in_3_steps.rst b/docs/source/developer_guide/backend/add_backends_in_3_steps.rst new file mode 100644 index 0000000..1f5d2e3 --- /dev/null +++ b/docs/source/developer_guide/backend/add_backends_in_3_steps.rst @@ -0,0 +1,202 @@ +Add a Backend to MQBench: Quick Start +======================================= + +MQBench is able to do quantization over many different backends and quantization algorithms, which relys on the independence of hardware(backends) and software(algorithms). To support a new backend, h/w and s/w should be added. + +We provide a typical work-flow: + +1. Add ``FakeQuantizeAffine`` to simulate the hardware behavior. Add corresponding ``Observer`` to provide ``FakeQuantizeAffine`` with needed infomation. +2. Add ``ModelQuantizer`` to insert quantize nodes into the model. +3. Add ONNX-Backend translator to deploy the output graph. + +In the following sections, we will show how to add a backend gived a specific hardware platform. + +FakeQuantize, Observer and Quantization Scheme +---------------------------------------------- +.. _add_a_backend_to_mqbench: + + + +Quantization Function +^^^^^^^^^^^^^^^^^^^^^ + + +Now we have a fake quantize affine ``ExampleQuant``, where :math:`x, s, z, n, p` are the tensor, scale, zero point, lower bound and upper bound: + +.. math:: + Q(x, s, z, n, p) &= f_1(x, s, z, n, p) \\ + \nabla_x Q &= f_2(x, s, z, n, p) \\ + \nabla_s Q &= f_3(x, s, z, n, p) \\ + \nabla_z Q &= f_4(x, s, z, n, p) \\ + \nabla_n Q &= f_5(x, s, z, n, p) \\ + \nabla_p Q &= f_6(x, s, z, n, p) \\ + +Then we have to deploy the corresponding forward/backward in ``mqbench/fake_quantize/``, like: + +.. code-block:: python + :linenos: + + class ExampleQuant(torch.autograd.Function): + @staticmethod + def forward(ctx, x, s, z, n, p): + q = f_1(x, s, z, n, p) + ctx.save_for_backward(x, q, s, z, n, p) + return qx + + @staticmethod + def backward(ctx, grad_outputs): + x, q, s, z, n, p = ctx.saved_tensors + grad_x, grad_s, grad_z, grad_n, grad_p = f_2_to_6(x, q, s, z, n, p) + return grad_x, grad_s, grad_z, grad_n, grad_p + + @staticmethod + def symbolic(g, x, scale, zero_point, quant_min, quant_max): + return g.op("::ExampleQuant", x, scale, zero_point, quant_min_i=quant_min, quant_max_i=quant_max) + +Then, wrap it via ``QuantizationBase``. + + +.. code-block:: python + :linenos: + + + class ExampleFakeQuantize(QuantizeBase): + def __init__(self, observer, scale=1., zero_point=0., use_grad_scaling=True, **observer_kwargs): + super(TqtFakeQuantize, self).__init__(observer, **observer_kwargs) + self.scale = Parameter(torch.tensor([scale])) + self.zero_point = Parameter(torch.tensor([zero_point])) + + def forward(self, X): + # Learnable fake quantize have to zero_point.float() to make it learnable. + if self.observer_enabled[0] == 1: + self.activation_post_process(X.detach()) + _scale, _zero_point = self.activation_post_process.calculate_qparams() + self.scale.data.copy_(_scale) + self.zero_point.data.copy_(_zero_point.float()) + if self.fake_quant_enabled[0] == 1: + X = ExampleQuant.apply(X, self.scale, self.zero_point, self.quant_min, self.quant_max) + return X + + + +Observer +^^^^^^^^ + +The quantizable statistics are collected by ``Observer``. It is enabled at calibration stage, and do its job. Given ``g(x)`` is needed to calculate quantization params, we can deploy like: + + + +.. code-block:: python + :linenos: + + + + class ExampleObserver(ObserverBase): + def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, + factory_kwargs=None): + super(ExampleObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) + + def forward(self, x_orig): + r"""Records the running minimum and maximum of ``x``.""" + x = x_orig.to(self.min_val.dtype) + self.collected = g(x) + return x + + def calculate_qparams(self): + s, z, n, q = self.collected + return s, z, n, q + +Register your FakeQuantize/Observer for your backend +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Now we have our FakeQuantize and Observer ready, then register it for your platform at ``mqbench.prepare_by_platform``. Import them in just like the FakeQuantizes and Observers already here. Define your backend at the enumeration of BackendType. Then register the quantization scheme, which includes the FakeQuantize and Observer you have just defined and other details like **Per-Channel/Tensor**, **Sym/Asym Quantization** and so on. + +Add Quantization Node into models +--------------------------------- + +We have the quantization affine and observer and the next step is to insert them as nodes into ``GraphModule``. We have provided lots of APIs for quantizing normal OPs like conv/fc/deconv and so on. If the backend supports what we provided in ``mqbench.custom_quantizer``, just to re-use the logic used in it. + +If there are some constrains for the backend, a normal flow is to extend the ``TensorRT`` backend which quantizes the input of centain ops. + +Quantize a new module(ExampleMod)/function(example_func) + +.. code-block:: python + :linenos: + :emphasize-lines: 24,35 + + + @property + def module_type_to_quant_input(self) -> tuple: + return ( + ... + # Example Module + ExampleMod + ) + self.additional_module_typ + + + @property + def function_type_to_quant_input(self) -> list: + return [ + ... + example_func + ] + self.additional_function_type + + +Deploy Models for the backend +----------------------------- + +The deploy stage will fuse bn into previous modules and remove FakeQuantize nodes from the ``GraphModule``. Just register ``mqbench/convert_deploy.convert_merge_bn`` for your backend. + + +We introduced ONNX as a intermediate representation to hold the network infomation, and the quantization related infomation will be dumped into a single json file. First we dump the model with quantization nodes which accquires registeration of ``mqbench/convert_deploy.convert_onnx``. The ONNX graph with quantization nodes is shown as following. + + + +.. figure:: fakeq.png + :width: 600px + :align: center + + ONNX Graph with FakeQuantize + + +Second, remove the fake quantize node and collect needed infomation. For linear/logarithmic quantization, we have ``mqbench.deploy.remove_fakequantize_and_collect_params`` and ``mqbench.deploy.remove_fakequantize_and_collect_params``, which usually needs just simple(or no) changes for other platform. Let's assume your platform needs (scale, zero_point, quant_min, quant_max, round_mode) to run a model. At ``mqbench.deploy.deploy_linear``'s clip ranges, add your logic to compute all the things from the ONNX graph(with quantization node). The ONNX graph without FakeQuantize is show as following. + + + +.. figure:: onnx.png + :width: 600px + :align: center + + ONNX Graph after Removing FakeQuantizeAffine + + +With all these steps done, platform-specific translation should be integrated. If your platform's runtime does not support ONNX model input, translate it into preferred form. + + +.. code-block:: python + :linenos: + + class LinearQuantizer_process(object): + ... + def remove_fakequantize_and_collect_params(self, onnx_path, model_name, backend): + ... + if backend == 'example': + # model is the onnx, context + # context is the quantization info'd dict + example_model = do_translate(model, context) + logger.info("Finish example model converting process.") + +We take Vitis-AI as a example platform here. + +.. figure:: backq.png + :width: 600px + :align: center + + Platform Based Model Representation + + + + + diff --git a/docs/source/developer_guide/backend/backq.png b/docs/source/developer_guide/backend/backq.png new file mode 100644 index 0000000000000000000000000000000000000000..fec9dac3ba4166df481a3d999e49f8520130c4e2 GIT binary patch literal 41459 zcmb@tWmp|ex3IYh2@o6-0>KIH?(XjH?(XguJh*#scZcBa?!jF*u3taTJLimCXMW5z zU;Wr@y1Ht2SFNgAa<4Es8BqjSOjr;Igdi>^qyPeaBn5#WOkh3$SEemZGl4&ljsoIJ zFfcHyTe5$Ff03Pp)tnSAID06s)7>@m*_?mppfO5C+oXTu#wR4|(Jw@?PHY`nO3X3&< zxep9KKEcDH@`(xHyYu>ed#3oo@FD9x^v$Zd<$j-QlKYC%gYNU00s$Z zP!2yxu9s22Cnz#9l2Wx&j||zOIFjxo3_%dG01QDn$3LIIS2bt+e=h_ftKj_as)H#* z{D1B9|65%yZFrb|Wv#ETZ+CZh zZf-6v4%I67dVIRo+0xYXK*J^)>LT0sOodeGLXL zma9ANjiA8`_p@P7ygY)XrKK%f*;OhuoL`@u;oU0CSAGvqcD-yLA8X9QwYr1%ulGj5 zU@-kYT6LXeqfbZmx3v6jNhv9kfZqjlM9y`E~ zJw82Qo~}GSJRpBW!Ns+C@K2N|h(M=S*x%PM*G)_Q`pcQZ(3}3Q={j881e4Gqw?^6D zJYGfDyTKf6A}TtFsj(0UjpUY{CLyn*P71L0qL>!Y4E(%4Sk7@Bxx>>6- zJ6m_0+Vb`3mQ*GK|AaXzTqo^l&L|Uh=ly+L-biXUHr?`;M6pOFlhHJ~yYe{b^A`pN z2KYz4@mNAp7TXOgff?onvM~uI6OKfbzp)n@xJ?-5k>kuF`;SHzHCgp$(|Ptg1M^e0 z*Vorc)NsGVY|>A3G%Xy5h$$4gvsOHO9f4m+K8*VKq5E%lAmv+l4Ie(;uLfChnJi|< zkQ$CbnM5n~INMU%A6s`?@nRqn59(Y+bu?qhFsCz}azQu?PIUBMt<~O8r1X@ONF2_E zKly2iiOlXZecOx6JF^%+Mx@X~ubran2NFc~q!}#PnhNMvAH;i)m_dQ~L;_F*r@s zBW9)IgeP+(YOB7@?3n1rNBp$zH*-B1YyFA(DP@?rxLMiRlY0&bj<=QCZ6wGrnC5k- zN)7_5WuYPLXJ?iM>TsR+ZLSwGiqfUh@`b4~ zQK4lNR?({cL_1won=WTFEykl&6l>2|P?<7KR*wvZ+VYHvYeH51jpge;`E}$9YTpj5 zmYR>|%TzIbO`Nc5^gKm^(-=96qpO1@COUWTvZOUgvuHmD~3XJO8Q#n9q*^ ze3IHevv~(@_`}E;{Njw#tu38)d*Nsl%D36OUnBZm&v~u#BpRqDdd++j=o6OE3er!M z1Ln&pZ4d=ZIB>b#E^o~9taGs^ZRgN5#~q`No%@F4F~TY0NB5d8)@rB|b`t#)P4j4L zRSIx8UFw^g(_)lKiHX-+JQo}3K8b#IPErp4IzW6(7>cC)oN<4)Y+b0RZmDbVi?@J1 zD^LZ;VHYN3TTxgjL#boJGEb&g4kMwkbnf)DyzA|$Y~`Tcv}s7;c_ikLT8(pCVK_@TUv6Ht+0OFyHXsM zd=SSfOw~4Hy?um^4}XV zBlxw~sFXIYpH%6BOA^UH)gYuq!0|BLh(TSLe27owX1UJPjaF~n6`gE?T!}nU0yfwH zzl;bO1{(*bN~Z%BBrGhfSSZEre*IgEhII^7NJRQwKdTfK5L5*P1!h)JiCX#%B9(h( z(HfZ|SxL#V0aXzq5|X5e@4e5%SZO+%T3T_jvD3B2l91}eJlV4O)4vtWk9x%)z1|io zG!+#T;7QFa8d`Izr?8*X^-VCy-bktQS44k<^x&7amBk)3+$}VKVY}?0gqjAF20gjZ zZZoE|sw!JMyXwTm#MoGQ8yg!vy_~JD!tc;k55*3D#pXAzZc^0XMQ&Maj@B+sGAYXL zJd1gtg9Wz0k7cS=jWspXC%3V&v7McrP`4U_UbnybV>F%Sv-g}iaL&0(+gF(xO3m<% zlw^H=Dz6?9bZL!pVHbUl&$_R*IITSwKWrfME+4Cj!fKxh|Y;^iGGeJoqrF=7ajp@1E)+u}v%5Gh6gMTq7g(x51AK3k&S*>=Fgn))Q5-gw%D2 zuM67CKAfynGGD%-PR)#2D?NY-+YXlI(+R%yz$;2h9S@SMKOYcBv?oCOEV+@_z+5w2 zN0i6!FkOQujJ@R-w7vuZ;pq$ z^U|%!%D(nW7yGWh{PLL=yJslvTEzH$0W!=-Xhdu}X6A}XQjULLTcd<@oNHZ+`s=C> zhvO0d5vd3=0VcYCEWMEzkMDY`vqnYZM&=>NY-Sju4ec~G-{mlYsdOZcTIY_z zc;OBHTm!ouU5URD{X?ar@Lx{FoWo=4r2)7owjJio#5uDTN4h0ILfzkm0qRON|` zwvqbaP}ArxKE64j_FKZ&j`l|0DGT?>>-F}S*%DB*s<8;IqG|ax4j*t(CP94ZG?W1g zwmcItn!NW?P2hY;(@pTY#%YsxLhq|!#+jOfQpC&4%OB;kO6@Ki{tu%o64$Bs7sR5X zp`fF=;rOz}%7&(5KAWWUUZ=>*(n&I(hq>UPKF?0NGa=$QY$w&B|r%5mx(29MT-PB3x#y(he;mSdb)O-5~ zC`)XHgJ$z-6KMg3#0}4p#JZbJ>A=tl==oGf)lh@3JVQex7Jm4^Q;th_Qr#)HPJXkkp|FrH}frfxs4PGH>$^M>b^ z=`7mEsiN;1nMMfxfP_P8QMN6t5Ye7n%I`u)@RSTJs=L_VGE@33uh=o*#KPF?$5dY` zxR}6AYlr=~+eaMW__URd4)4(8!c0~qlVw$+%sR7p1(7ALA#NzUc?RL}ouu)b+3X7PS6=$X^n6)t~*yWbTi<_U{^CJ4S zA$^W!=RTw_@a-NPbd60^#>17pWhd$+znp$|F$@~N>i^O5^epWnv)ukrPeozmELS^w z%I?E0ZLfCe9;c{KegkE{B{v(lEs#h-NZ8BwBfrTY>WfrON2k?bHWFS%wj)Q^qEL5a zl3t{DVfQ%~iEa9Mnv2cte$jMZ#j34_1Iy&;nV-Hnn{uf5MS~)A%9>`^n@=Yu{`1Du zyz9`Wv<4d}LJ|@M4-W|x1)qOxrljxzbbr3yYZF<`-YF*Yiay~Q3Af_N^bK?~-J?w! zk4^F(6q?N|YA!;20-twGw|D~jqgRSpr^3kcthUb9%JT!->G#V}?};F6>*I4RPR%Nl zSZm>_xT9w$Ps@wgjU;G@r?Ap=Xh{){Q4@$#LdgcyNOl(e>qJaTrJ@lY#iE zTe&UI=~OcNIM|poY4VcUazSZ_%iEiDv_O8mn*BIoWk^8(-0O6Sp0voKP2y~;HFfDxFM)iTFYJz-q!l_oocYI1zg0csXvxS9 zSDS1VZ|g*eADS5h<4vu=;b!U`&|i9cdV)?CXJHNL+q2iONg5@uYXyh=>=|s z%nc`PS6Uib33jebf{-ozB(PhZpIQyU18xwg0tjFJN7G+P*yP;XhqLwJ_|;i9%f`*k za)XEiGH~K-(DhRoU zC$vjl=%s!po+HFM&BSotGT2W*;=brS>3UfW_Hsc zTO}wFID#NdySPPzX5^ky$lSEsCz;*eQE}tArB( z6J)k+2(kbI+wgm!zW_{G^h(a`$(+bHiCL2+BIn|tLX}|WoGDJVz`&F9=gK{vF?%xz zIo?vl3KakKW(sdv-B(r5V<(S_Sauds99xv%X&!Q~mS3>Y67+(2E7;-nir%+~Y^Rh( z{q1)Q2pL=m1oKHynCUgSLFb&MGuR^gXk9VAa;1oAlLvkPh6AC86Zq7EF?^(!$CaXzRsJHLYYjBFXQxX zmwy4f&Cu=Sml8!LkGF^P*yF?1&iuoEH*MeR^S$YKnsKpsXZe(yVkm}6-1|{cc6&!h z4czy%>T0@NIkmGo;#jfy=YQ_5F&t#ESyMwQMGP7d!f8k4PUw(hsxZF{JoF78x|=Jk z!hO5Cr<^LSyUtNhuH2-b`GLE;!AUJ`P$zMTcOi zE%Mf7$%_Obo=X4$Q3YpcifAo`J$J}gT-W8#!}lPS_(WGz+iU(A$~c~r!-11^pVK2= zNf(=m*B8oz{R#qTsk3j*`Lyba8Oe!g-rU@}p<9A5xgKe|TjyDf2b_q;w1?hr*VAn& zyD0xi*|DI9x0i0{&igA9@BaDqupZCuvrj85LiGM;<$g+ zZ??Clfu1;a0-zGeE&GmdP;kd$!TrAY5OQ=FR0;AKJBHf$;k+6s;fir0p~D5 zxm0H?J!oll)sU6u!g;q*w{N+*0`QX1s`vkbO#IqT@DS+l;UV?lX%+Z94wWdc8{b(j zFI{7FLA!nP^v;R7Unqa_y%4vrQO7EqepQ|6%fOi8$ck@}&bjeh_?NHft50*}pG7rB zB;GEdu4!xmr^~fmqk7s9Z7L6LM7lJpdK4{L?9gQVqt*-AtA-AX#Re=6w~;kII+dC#4+dk z`WV_!4|VCc_*8F>VHZ*YT~mRi9QxERxC};3r7H0lfTmwv#hP@=B^stRr2}d^lKb_|a5(8@e#W5rgP_IlO3Qq-Vc&WeGXxllR}qpTDmI zQ#`vPMCmx0ioDw7b0I;Jw%y+h?i1t$+25uUyN7D%ezMklWvV@1X|N1D95}`jxTvh} zq%0s-J{)*I%c%dn>Q;5|s9UNkx``#{==T(6Cuexw^vv7JkfKbXI6sU#>X^MA&r?-N zzsF<(Z77p+yc5r}KFE2B@pUPbjF&`=d;3htz((Q&2!Abz_bG^GW9{z*rn{TRz8Vul z5VO1a#?bO|iwb&)8|FP#ks3)=p>_VX;MZHoZX|;{+tH%|5NPBCi#>}PrYGT;gXz$k zL6!I0W2-#%7#R(Ba59e$x-w6%lFdl{Pdu+fX_$bS0A_Gb`DO3-LLO_(`E-5QA3le& zu1@F$-J~WdUPl||Ql?k|W+HeGud86~>$(UvYb%&=NXOb@s=pyy;vDVapMD+kl^o^`{MdY8=K9Q#$9_hQPTvt5DN4jqVOoeRVBpc?P3H0N)UqbBK?Jg5G5U>wJgA7?j$UV zK#{Fa!Cm0>Zgg&5dl{`j&U^A2gGYKf4u;%F)<2#sl<)XXqI$Kf@&fjH=!4IX))@cR z=MYU?mWaw9F>7dgp?VCVaPMe}^Sh#xJQ>%BC;gS>Z^+kG5BuQxj)wBAbDB;93vM!6 z8iSUGq7~$i`V!8!cf83EpkE#VoH>*g!3g{*@>(7aO}ez}v=X0ehzL=if5Gyh)6ZuJ z{dnwhKNyYZ2ow_`ii5id93+`XdQr_duzGQo%af4~-Fp7vi^E6bi%kh`e9&L!|J0ue zj+^n_A2ycW56XHEo8C246sXkVN6%oNJW=vleizdDshoViqDa^;s|3nv_N1x2CDP&b z3@{tfI2;t)AfEvF^~k-ahKrl_bx9dD+VYdL+T=#B4bUR~&1MYBU&O`5#YPmBQKOJL zK6O7aCoz#7A2S_Gk@x6N=65l4qq;uhceUb2{y4oh?73e#(=G979Rv65*h1h3%3Mz0 zh|A8ss|?7DliEz6_WjuMn0A+g64D$IwAX*}ICGWf7cGQVJM!$%4_zCsa(CZfnzW23_d-QU=x@icn@*6E>1RV#Q7c@y06Ohv`Pv1TEqOpljP{gr1}p2Ua~nXSWrQKHf{5JPI?hs|c63nOqU;+1GEQI}$`a>NB>{SEFZ* zC4tFM<43Zd_YwrgJ@)VNR9jJ6d$;|kS9OH(${>)n=%>%54CzV+5_yESI}(jppEgR2 z3<6#~9bC9nOtTIUvA#J`eg=`9any_Pe7U+r0~IqKes4HCEiVuZJR?=Qf5 z7kbpS9z}cmjXc5f&z5B}j!Ey~l#$wR2o?eC?uI+65(7VM!W}cXvLcgx!@=kZaPqav zQWSxrgEs1WMj!TJuT`9*KGI~mr;_{?`Sbuzi1gj<9*y3TQX^JZlVeieQ^V- zPr50sR?%ytZ@e%4>x~K1#T4p$9WC3h>YsGPgcRG^2X}+TyA_|nu4{^i$DS%Bk+mX5 zUMIcvsyeTMgR7{TE8DR%wck_+GY}sp#2;#sk(S-`i~N;A1d_3<=j%hRPAU`6Irms{`-^Rn1}6>L16 zE>m46agTcO*|k8VecBe;h{ap|jRPOGk~-B-!8%yy@RNdti|j}ot)=;1Em9p*KX$dodJG+w#afD>V5NqGVwmJQP?GuKH8={e$#g(!PpuS%?{$JAQDx=lO#cl}I<8JfDg2>Q zW&hb8XUCyWvTz&C!@J5f7Y3sXlw7_seseYsnKbu_}$HFrE1x=kN4)3mdY*$!Dhx#)7k#y53?DkVQf5@9zbtRHZ2W zYZ{5^2zotgZX`_iSkazX_>4ly_jPGXy)XwIEkEfGV(^!xY)ors%Gsmx#;G3D{9=q! zo|?@Q5P8t@bey`<<5tC|+Jx0vAIJp`uPB__!Og!6^LUY}DIUa)IL}-4HbUc%JsG4l zv+S;UF7^n&M`V+qh6LAgLL4SqAn3pE<3lfp805*7B2b^r?KFMVkV{tteYIfz)0IpD znr`9kIFg-)EfGLUD4kqOB7C8D+dUOqn1*E;stvJP&$7492EN{81g}w&G@m&8%3k2G039BF_oizh7G)UNrx;BuC@nLa=O0S}mD_^ao zbg@@+C~^?74f#PqQ++!*Zm323&{PH|zf$iM)&N+}qxssnq5a^ii^DvC+cq zHflkJ#w8s=`%|810}mRBko@=^u5YKa{Q!1R*>N`n?mq46-Q#|L$6^Q#k5A6Lf+|5( z!MwgHhR+NeZx+SWJFCR<+Y9=-DPkH$@#>rg`NY9&K&3%Jt$$8OI?|4fJ0 z-7djc8!Ow#V1t+I@n*#$rM)!G|AyuD;qlLuK2pS`iS zty3{Ld|_PsWaH8yz6O-5bhboE#phEMD!>Rza^60!?_O;77Y+>TLN56V6F?NSu%~_u zAA<)CUT+eL{fbWq+caGJYDA%r^9-qb6F8jwfxX*14)Q*gKQT}LB2*f@5xCsf4~Km) znThIv_fRJ)&ed!5DP=Mc2&=;Mq58&W(C+g=P~$S@av4OnJeBly zwui?fV7UQ-JOsMouBt9}WIkjgkaV0B#HGx4-4X)Wy*BGN{q=_}NI}tMNI!<-OSEe< zeI!Sao|Nvy+qm`%G8<&;OgfQ-F@>2qB7QRW{7j3r-^Xh@8c*dUzU#>Abac~Z_r76J zh%rKpCWBz04DU&e9maTZ9=u`|YZTHcr_*)OFeZ*%xwW5^TqOS9vM+9i+6sa~0v`Lu zmG5^FCZY9{>LzMX34-a>O%9K*m_rid`g#^#l=5Lm36UHrmz4Rbgsr|=jWr)(WL_C> z4f~Gz>$lB`4~w=%^pXDi%Yh;xtCk_|F_}c|Ple*Et<&VcTs>l0J+ad}B(AqewPy;? z{}N5ycgbHFpQ~*2tacM>&XR_Nv%=$-nvS4yh<-)c6?V*|I~(?03M?+R4zOeVP=D>PG7~m+t6?wgV)sHM*DaGAcD=ED z8Mk>1W>?BT9zwgwCFO7sl&#cfyF(Sh-f8SE|1hIF=)lG^=DyX^Pv>Q&i%+knO@6t} zHED@)5bAyX!YaMcA7ucmx(II=W9<2keAPWjt#<#l(iu*pIsc~CeE%t-won;8M8^Ld z>x-l$q8NA2syQLd%aS~3?(d7PZ=K$q3nA7~K#}+OR{2ja&vaYkP_jnsEsNX9_8Ihc zg%xMBSbGH`8I>}yvFV*CkL^SbB(ir4Ple#(3wCM-#j9Z$SqbUlDj?-{U|zy~7bm?4 z8YqOCPYMXpnsY5BGhL1LomYu$-62aWOZEsNF#W|wm!9;qaWq_CQ>A_8X8G0xvdYMP zBuQhr?)Af9OielnG~^-`W>%*d*aq%Zey&X9=ogN;Ev4 zOyTdB>3@9Scz*{p{b+~1)8)V2JYWCFT0uMM`;0YZiuYCeMAY4#^~ZK+t$F&nH?&+7 zkMYkLF9BrT6FfVK66iy0jMkP=r%ulPwWJ2Nq=vm8lixv+>L-(*abz7i8uVWuGVTYj zW)?qAL(y)#P_6&ctUVYvP@7Yu@n{VbzBi&}7vIWC__>)S?`|7n?v@VqnE@}1wvVp} z;w0vyW7-Gg*I-Xpct_IT=rZB)P8Dr#bcZaUKR26oPIQ@WPp&}^Nm}yLUp|bQVyql4AwXFn3ji(^a9-Z}3Tc8{Clq9W5zUob|zaZF51G=LMXO!><|&uI7UX@=}% z4kLHj&G`1V0SG^S*iET)WWAXwY+!GfX=^sWfsK$=P_VZ;^&Qhw=P(b?2sNh%25ud|_er(yX z0Wj1F5SWjO8V49g`mfnW&v$2MD-9XajkjE){iD)R;iPPlJW7))kzO*ZRv zn92*q^7+6(o~^gBGBBX#%Zn2b5U5sap}>3`7#IMiS~8giElfyWULNCM<^JjR1Rxs$ zdG+-4RDB*BjKO=KSJkD~ZdEE!&@ZMWq@|6SJn(dL^LjpO!ibXH+uG_jsRNj-wA9qF zX*y(cb910O9A2OHhzQAsNw2q;M}RoeXmg<+Xo*u$R0QrkJ~`3!@J`EB#R1Vd?25_C zPMXga1_uS{J)A^1ba9GEzB=$uh0^K3 z=uHFEUYc4^a`Ga8ggkihWcigE)6>%fj!zEF2=@`CLgnFlvBW`44KrZfP?H zL=w=ku&~h3`VDGSDz(h5t*xsqPj+?$k%ipc+&X@qbq7NG0%6le>s6PNg$l=miLbDq zo;BTJgSUY#0C>-l;o--vP`u9P^EOQFHGfEGA7Ib{>?M&Ul;9rxRN36DMiS5Me!UCq zZGcPBa@_jCKaurV;q%+5Lj zVk(`^!o1XwNvAI$A~FDQxbQ9r06+U5>^4!S!^8P}t;PHK?qO(9qfn*H5GV!)Hk+N? zW>Y!AaOf*bOLJ@628M>o)S4=Ca+g2|*!6U2dU_fqJ%=k%;ueUEiN@frw>X*}PUQlF z*H~R$Y^n?m5-XUgGo2(MCMG5&MgRI$uWca%7z7SZ&im8l*}=iVKdKEr$~d53zkbVp!yuYXS0x<`Grn6#M{i8a6Sr)BuFqz%u4mNRcVA#;rc^)#V z&Ck!Ts{;cP)%o^xqM>G>ufK)o`+|awo}7`95gY3-Er-jGY@43%da;rC7*3#qbDIG0 zxk^f3H>^XHL-kvoj%I+yfY_D2LNpZb!+Z?j_PqODRAfkY9Q*eA%xbxCdN`er5e}v9 zJm(6`@i9O-Nl8foVnc^{8=INA-tGm!-=lEjFDK{ov zQn?93Ypi5J({djiXk`wr^ncr&{EDvX=4!!@{O!h{@kbm_>qd+)qvwp(=YZ|8G?tg9^PFT=pU#+5~OjN4tmuypLrAwTr`?GN`9rWN+^H{5?q`F#2&-7IC2;YTl7IKEb`^b_pcA{(`#M%5+n{ z@=9<;y6U>pv;9>%|DT8}0pRy2coBSDgA5&L^tcBHrQ60(lzg!fAHAa`M%}8nqz=6!KSK>wIY7K{i2Da2f%XBUpM z*BLKy!X=(=l9;y&)Ej?@V3ufhB_`YvokmkWLMMuAI*`3CXo{cn@YAxt43@eZ30%vf ziR_#eD)Un4X%zmJaJJMu`gZmH%zs#0a_Ym45#6{n_0RAt%Uu7$B7jYh%+X(WtKuXr0ASKA;t49~SFZ%D9qoaGP=0Xm2&}8f% zz4kg6;C?kN^7LiJe}Dji>eEP~eTmX$e~2E{(s(oCrkL!TB-uc*S?#Tam+F7JLoQM1T*lmPY{wkxSqfG3$g6A^oY-Zh@lUx!Io>+8x4I`K1{(yg_hz7y$&5 zYj?Z7n+xU8$8&u|c>0Nw0G@TOWh(1GUanZWm zVfEykY(a@hU=QCtU|%-CO+4)}S^|Om$cq^_*N)5<11z9mi5tlTV3g^HzqnA74NPv3 zC=@V|4%U+g2mEK&p0uh2ik?0umj7gPC)$3&Jx=w{z(M2w`|GU%i}+D(7d-RsiRykY z?5QdXxQ)-H--eDx_;b{i+&0<8cUVL7y$8^*0aQ|cPci1@lN}p%9=Y} zEsY{j94?N_aer!&i9t?|b9h3Od@e|e7xIb3J;(xDM?V;oLDTjf*?#m5UeeEH_6H4{ zIF_>0M#dp?=<1$p_gupqD2c(Vgsi55uI~%z&q{pPD@TE=E$nBqK%|IXTR%-Kr)HU;QTw5*p<&$EJuNp#ydPqQdHwJJ26g9b}{tBQ+4Hm~qGW+H|H2TBr+G zzWMBHvUal7Xjad+hswWaIgj@QwDVU5Ng$gWS;|l zAC@n@wbD?4X9rG+!#era8`E}bij`YK6)xs25*YTXd%Z$-`@J2=MZ6-2iXOjHgRAQCddnG@k$)-ucd)yiJ1iXi|>+Ow> zR{4O8+KTT+2|}IVVbUeC@7_+Lz}I^Iz5@%)(Ui1Kx1aZp`5t{$@=%ZodBH_$z9E8k6|KBX}u9 z?RKGyNq>NwG}dn-43m2-e$D5!ecm~|{%S5+FxgU)8YqsNI~jEUU+HB3*YolJnQHd` zyY7ETU;FPq|8I3SxPYzl{;n@4hb#gGAZ32{``~*MpMR6x=F0&Dk1}y&#uxbTd3};6 z`w9;y0K*LW3R|Gt<+B(6l0dGw>%a~){AZ(6)}pxw8UT4qqobpW6p4V&CM_<$<^5{+LOv=?SdbHN=)1@@DL1kAN=(I`ga}oJ8SsTU>yQs7HzawtLBkrj z*3j_p1>mqhj^x9CnB)b9n7N&3@&J4#1TBDo2CSH49Y6&JA52oF|3Ab_+Q;YZ ze7#LjK!A!*nnrBEDdaLR1D83R|VVwCf>-1XaJia$U@qDyJ42A z?1*S{*Ex@g;|aJO!|~q*+XZCm0qo>-^I<_tWp!b)v3$m2^Wok|A|+5ke+@&R#HmuD zWiFp#DW9P_3&*gC#zN!K;&>1p83_;1`A+Xl`Pgte;ky!nq%5=wCImQ|SI;la>BQ>S2V1sl8DqCde)n$fIn?XWC5K z-&n!uJCqz6@ZhTdS9&{4Lyht(=1(%IKuCcEL2}f?laoy#XKR{RB_+8lN}hi*J9B8A zLX}1(=Mul}5O)EOovw}^==Tp$`Mp)71M+op7-`VFXIqEr%7M-lZ<#PEbw|8^2uKG7 z=0NT>_~L{QO31c9tCRc>u?KkK)_31*uB8^ezjAUWzGCKD83P-D-2lF`wChq!^h{y! z6zoD|-$TLK!DaQKgPf49A}-!EBH5+DNtcGQq%Y%LXKTIW6TjNDR_JHi4$n^t`yGG# z3Dt0lzoP4ki&qQ`-mW4>y|=V^TPn@n4!T|x?pZd-Lj<4*%8WYHT+D^9J@@c8*1IqA z#ncqb?uLraj8UYkSinX6g(04=eS419v?lo%NkKZ91O) zQS9YJLb|Hm^XGdMv}d4_cIxUwmd$pgCeqHG1g;L^(_MkxoSgjjDt66!+vKmmT&x>~ zgs~_m-y2p0Z;X22HExll5h7-T`U|c-o2J~)kfJ>~1{>VN2wnHAZ?l~9sr){I_5-6& zlSnQ*GO7)glf9Nx(Kb3fpnp2jeY=ggZpHJQhitIdHYI=@rwAz#s&$9Cmqokkf4zi_ zYX{OqhAuPm4;le>9nud~^n7G>1m|rt?ZanIDQZMznyaN3@Xh?e@0rn_^LH3ggwN8n z>wlUYy?hn!Hp#x>8J$epiIgWK&1pBvZ{FbXN?7r5Zkt3th$0O$$ZmwO&dZ-8yz_Us znMSJlr2nAN{mbENj*x=Qm6sOAy*h<_P(~T>v|Ax2e#fT zm1!oXUY*K<@l_+Y?Ba1!k8wp(urzLZ=Z3=X`cg0j0TM&HNaT2ce)JtQyBOZ6jymz; zeNaLL`91mjp^Is|@;cnZPUO5r-rWhE6+O?bLcAu~dr>wmCi5@-72C}P{W?XqY)GJh{DzN8#%#Qq9m zdE?x#X>Bx=S;1i%Io~};ED-mEZA7t zCHK=aUv~X(6i@wd(|_4Y29ffmqBDU9V@a}X#5Z}!B6#Bzcq0OMBNTW;0=Kkf7&9SI zpq7Kj_V|r1T$83322Sfw;yR?Ot8%wDyN82@Xg{+|X#wrej1#sqwfw&wtki6;yn2zK?Ly&6y4%hy0!G7qk0tW6k`3 zu>h&Rs|NtHCMOPo-9 z8a2;Eu96e}R8k%IcfxB0D`!Zoz1+q@f}2Zl5mLo-gg~P^(v`j`qVk)2BnlDu9!O=p zabex8BRw2(%T4a!P^9rS{@thm;dir*AU$idrtDSN5%v&g`2aDewN6T;Zn3giXN zwM_A>zqN3=J6m-^waM<4C{cj$VLk{>+mXq5(TZh%w;v0{U+*h7psUxY2pfjJulN)j z@EG^@T%hx@&j9_+cNzK>(PKURY?i|t>7YMRhS z5+pao=s%OcmmcT$AM5c+Cg>mrC4EP&Q;;oiIM&xZ7(Eckt>)P$;JClgcUP-F$+{On z3MY}IQ4NFwjO2^!UW4Q2R9^da?7v62S(|)_#M)frZ*U`YLbBaS!l$1%B1H{T24c5A z_|{Y-{Q~0KmA2=XEOSbnnln*=O>6NwZSPi@f6jchQ5$3FeDen_N^cXhaBq;L>7012 zBgEnTUU4NXk6N$<&yqh6yf)?R{aZAe3onMZB=~M}PJS@97Z$RaaO4 zkz@LLoF3qe-tMb4KsQisU}pG%k+BZx<*H=py*wY^4qwOQ4ua#^1~*y+$atE!lsZ5P zL7C53GgW8_XW#d{K>WyWB#O)Sba6I>$CrL7I#nH+4{b~D%hiAKFfEx@O32xeMzq4D zLw-|F|08y6wc52mKwA>l?+3VLpp<)XF?4x8k$qovi>!@Ol<;`_r%x2%(|@uH@dvz1 zq~YH-ui4JOI>I^MTy)1m-S#k)a~$jVFKyDY`7SNta%`<7&IH6DGM~elRw%T_V5vF$ zLchGK3k7J2wLR8DLJ_$M*@NJ&GcW38Oqjhuu|mmp?`>AL6p`vHY(STdlH` zIM5QpRp*`1W`W>EeqJ6AaHs%jKkoiJL!_iMC>gUzo9%kY8xn7yB_^Hq=BNGEI8m>f z@!hHs04#N@ff6@bhNy;x*jrq^;4SL&Z&SognbrYoapaHuZf_V>k^2qi$G%9OU@RH$ zMO*tx)K5<|QMMex=yb?2H1oA_Vp|D6Rrpaf-Pa19f7oIhaOn&o4TExK(vq{*$TFpJ zB&AWhyF>wzMmnThI;0zvZjqKQ0VzT0 z20=Pi1f)X{X(<8e7TA;f{k>y+-+1@f|LncSGaUCLUW;p;>zs8Q$DC(^eGjTwMHY7) zoBz_Y4aF7Lvx7B3JQQmE5__7r1o(*fjHA_bAz3Y9^l!?}E+l%|y6zvSe^)krpP}f? zdC0jg9i8UPqEAZ8hV;4)neAX+jn%k?J@5~5X)I@i!@Qb`^!s-Q1i69PYX%Nxs<+Es zu)K+cCRmCrWIp#Y5Uic34PpP@D%z#r_?7W5VJzi+Y-C(EK003V_dj$T?Y<0e$82Jl zx^g~$;1;RB|B|59)q3>3{pe^$^Z1Jmo_8ZNTD%)T^GI?KR|53>J&oIOroOJG=KOrK zQd3j2l)RXkIw&JuhEACOI*XcH%B{M=zJt^M?GkBI4JH;=Omwu8vNAUzPH2xpB!`N= z)sC(;X~V|zb*(kDEbTv`uLs?^3PKA-*c@wYHuf4HOlF~`{yp{lJZJc3h2{UgxvW?@ zeRN7o!G5h~&*p$y$ye>$D3SwN*2ZgL{tx+UI2!*C=!Attd5m{iae1+et>y4GbEY2h z?K^5Sb%lQ_r2G1gefGx-R|m3+aU9=4i@0Iu^mqo|!7rPSqu1P4ULLfgmY77t?H=zL zY3{Wep)bh)o+a5fdb~;h(91;5=sn&QTVCP4eloJ; zSJ;^&yrYIjLmWPj_%tf^yZ8O_?|nG}-^s{44CAT6Q7rBahRZaH!aWjff&q^N8a>MM zn!*U9UM%_}AAd8mJS}$r)7@tqfkc?72mv9XrsihVETIV?UAw!x=jX3`XYI8&id%oE zP?^3Ma=yIu<9&I4(|)J&_w7fQj#f+S-R%87wPLLpa^5b1N3_ze! zC`}wNbG9Ah3 z4(#ylvyXXPs-qJ);t;r6`78G6e(hCW_yX0`ZA6;pWg7aW7uI?gfyG`^T_W+lSLf-- zh!O9n!^LbI1&Vk(sBTG%|~{e8o<2v%kR=0yfu+Uc+>4I2*eu*Siu^IoaP~T##wu0ZYMCF1wPPf8P@mp72n*TjW zYkEW8p%KPO#8gid<~8}{+wP_v#9n=vpVYRcJ~*J@b0CfSa&CrqOhQb&w4$A+jV)682%E^n{W7$awwFReTFq0T zvHf$T+*Aa&MP2k_AV&Gk#p9R6xowZg2sZC+JM}FHG z!hLOKml+S=a$oS3rfek>h!kcl1l;pEaYnJm-8*l896usKdM7u#IgQ`6u!$_I^{irD zcm^%in7vHsZ5#&fMa-uK77x|Nw_6?S&8%)cl5YtR|0k+doOHypapaZR;B$I*2D&c! z{L!#;WBv0lOXU_W)`<=%W zoE$S#0w3j7RAOUd>OOv?(?Wc&G4r)vjishlPOhwB7RB}WnBv{SX#s{akf zp{}m2HK;O`5Et)AJv#upm8dDuI;o{W-yrXm%hud=GcbkE=1Zf&g&l28^S}@bm^eck zF@0>)>KJf1n~W#p*oZs^rrr&$$!t z`4i!x6aO4@@s*X8%duz@PGi~-0YO24jDG#FQIL{aF3o;-kB#kdLm(b^*beJY{K~aF z8Ca>I9I?2Fh=}NDx986nS62a=sjsW6D=#nq`0>z2Amb6avavCBg&iVH-_fzWsA#?D zv7dxQ=PGr@qDTNUUOS9AGW0TiR8(3jg%KGUS-pNGHHkF0vAykoyhZlL+|*W9Ss6z% zA|t~}Fr5de&YS=7 zJI_hW%EEvqe}Juwjg9s7^9<_t-_gaia+bxbe6%;4hQ zYV#DJ<~&*w5cYBD>+5sb4jOsa)<#YDxoL=(c9V3%R`%JmXUD!;?C_*SbRj*P}E;WEbS?ip$P z?CpJfw*oo31Os_T2x0#X5WvB~!GVE+zP>z-V*dl}(vp$~2K7o(Qpom?c-@3)Sy>x= zj^4hPi`zebw0B&Ov(Z`qJjQmJaR0)6Uff6-M!Nh0_dGibd^&K@3=9mxuU_4{bxT@W zngeFxl$XP;r{AM{`1%TGse}_hMTk#wV+UNi&jYhR)9`iIogc^1VtK)SiD16L2SO^g zp|5GOG$|!63urm?&`Bc|6cuZ0Zmx$sJg%;;)wfGfQBcMwCSc@G)hDtf7FFs&xeM4( z)6>&XVd|1o4AWlbfx*y*>ONH@8DrMo!}v zRtARUjScwQ{y(jMLFR%l8SjqZ8rN;T?0yGA9k6O>;bvGWi7?`Yn=1dJS4PpRD=vZG z=du_ylH?xo`t_1anGhwH1=SX_uyAu(*%s(TK#JrChMc7$hp~5XU~gwv0ZI!gDJe}& z*ZtqUVje7Ul!8lZYb4avb6*)$*2~uZTDoxgqFMI03JgBLy%j7M666JwxIkLm_GkX= z?2P1rVFUQw5H&-?UjR+2WQ#O{bP1#goQ4g%G1*i4Iy-X<3efm55K?~z zGWdZPhp92|TPR~G1z-FbFXpix{J0x|={AYu6Th=WS5N;c>HXLbG;f5izMkInRJFOD z=QZ7?spI3vkG;LU%3!4L9D0s^v=hnX4t z&df=@g{nCLm#?2{@~1hAe3q6uEKqUv^t>Lt3I#U^S5Qz;dU`s*n{b8W(Y#4Yss$sc ztu-%iqhVuX`}yLt(b8(Xa_3F%W+n) zWm7;#_2%yv6g~9Kd@@)bf7`FH4@j}mB%i=5R8;JTa>BsC0A8M=qWy20sQP-oFLR$l#{PUsnl-%bWwi49Jtm2O>#Iw63?w9^ZkpdnrnzO^!^8MBxLhId zC3tvvs{fJ!hOSHE0s?nO$L;$D^){fwfxXsOv+LIW>eVZby>E8#Ucg_^0$vP-dBGJ1 zE-o=Xe$~g1A8TuCtEv{()@Xv70jakqkAV(rluvgM-SO^z@+LJStBS8gXlQ5vZEtBq zn9@>FQ8~}G1bI@K#%t^7SX)>eT9V`7;9%r(n|}o_0L=}?ayOUtuZHJpYGRm?(mz4a z09FVFU-gLwb~G#xVEV_$$2S@!^<~9ht-h(j!N_)!2yE3!@X=^12n4ActTzVTo9ryK znplF|^78xeoIE@SKqtWag)bn1vAE~u?tU{AZ1c~bKkMtP_wLpHEkfObH)D6x!82J_ zTv-_hK07-4aCcX?&WucwZ-rnh%8LSmVhd3zIuyJtqyhJRh77j{EOUFh>VMqyFpkNA7MtK0l zNonck)>gD7`LKNK#BB@=0oX{w!aStWaA(+%;IloKzs7*N&%?tbHa7P4>(||lVgUgG zFk}i|MzAJw2e=ZDpM@97%*#qkhbZJr3LzPew-2q=4#WYD&QKui-bL4lyv&(a}+m ziowP6@L?4RIe29>G&G=!c)7SBs+w(lGGDNN?&_NH--V8fI@=y7_VD5UuSQo`+Z%=D znOKDAq_Xb3K5Nc=Lw%-o1OU+Ji!{uDO(m(;^Tey^f3=$P~O= z6FXAT)Fi~qdjvuq&?fnt7uwm_G&D7llaXy~Zq^=3Cz@7)qA{`8;$gbqztcS!6bT&~ z@aE?OuZ61s^+@Ww^`(m-{DBlxAaSDqFh#9zGd znT@eRDLfo5d`etv)0+$IH#H+8O4%>q-|2I30}v&BeJ=!ESN*KVcEQx3aSC=w?fHx@ zLRZqq=N$YP#0pHAy~ld!l0_92xHY(lE7Xj~NfW~E@au`z-F(2M+u7Oq`1m}1N?fm| zqM~AQbIo^j&~k8a@bbEYPf~lj&~IyVb8XGHw^vo+XL#?z19o+FfoZ$AxTHab^GVsS z&1=sL7dQ6ZJ6hIfh_FHxS{4>-XJF+ho^hysHmu{IEZsM509(89-96)_bJph zgCr)oPhk~c0$qG^vK!bNgqgWHNRx~q-b2(ePY;92c)kyE9W}Fs4CQQ*vs9CSvz1g6 zn@2xkLIVRmJ?QY@UGM~6`GJ-Z3k&O3&~9i3G)?Hhvw3*%i9AFxB~4CEm6MkKcRb@9 zJSF*pkp5027!9l-h&I*85r=}Fe<`z6fakkQL5XESMU#-8E}yFgFV*$>${#J5j-6e| z|NIaryaCA8(9F=KVx;MBSV)lbftP~mZ*aj%EG#ec@bi1Zl86Ncn9S$KVS=S>f0}oi zYbpNl0VVWy2iH`MB^E#b$)AbRT-LUv^z>PmtcZV`gWEiHgy|)c`&k6sUNV0to-Jww&VP4bUb+?Ey+lw0tCl zqz(Agn3x#3SjuN{-DDGbav+NXZWNS&%DHNgxPs>Ho5em%w16ZLdCcZzf|P&+NRH@!S6tjXF`=!j9E0=q z4cTq0fwxSotfbJkrH78d$W2R2+ud~u3=BN>8sR3djXgg4{2Twfc=`CuHp0PIgWft*&^1lTN<(85o|bL}VLDZ2(w?nS zsL4vWKEkaF+0f0>I84zpGNR-o$=1SVfPY9qMpoT9to{Dprj3I`i|39pL>YKKu%d>> zMo3B8AgtfLdl&3Et)8rk@c<+hH$rmmz@Vg6gb30)dOG@5Sxc!pO z9kRX~=P2su1qquPd9Wqyb5H^b`JL{<`sY6Cu&D<>ym|A6 z@8LsCZ0xd&_m_xt0!XZGyhatXP?)P)T-$Jm#B6;BE z=C&o)i;Y;YpO~0P=!LxSFRXp{>?6XEkPsEf43F=PB2Oyi--rmlE>J#cdaAdHpAnbs9lk7v9Qj(MFfq(=it1U7b64R@hTYO5dSqJl>jS0LN)+>Td zO9f#QK=K7cM`9@WSJ&30Y+sHngL)do!BChycK<_YKuxP?MFpk|pM%|y9TM^xycLYy zg(ou7)J%Hwrr!U;1AIw?3zEU>xHuhkbwNl*A@#YDLR9q&z`avaCcrsCbbvW|EXr>` zLWd$){-2Evcua|Nqz)1@E>h@C`10iom>^f&nA*j1oEWoewu8O>%;Y5a6WQSPbsI1c zc<}gmh-vQM&Gh6P9LgZiOL$@kxf(b^KzN~|0SQ0YB)px4MAR+tjXI!Mhj$HmM}B@j zJkn)YjEk$QfvzqJIy&5NzQtQpMrH-z4%kP#OzijW!BhqC*<^R`n%mf5pgtT2g9U{z zB%@F^LaCc8j_?(BTaOP84hEMCcbsbYL~2ht1y&h&b!H7U!wvS-*Z1z-JMcIlzyN2e zD=v=L0$(cZC1k9 zd3j3<0iWAP;~@734+!PdVt2UhBl1jC#4TiG7*)gs-zr}NZh9N_VJ4)UJ0P{t)fIrh zC@5fN#qY3s;p(cbt$hvJ8!#;JzR*9CBfGO%6WjFJkzBgfNanFgTDv99LlV033kvRQ zy{GPL*0sf>D7WSrH9xJdAzxZU9s-y6}g#~ho?p}>9bG*l)6o_aPLd-9N zgDUFkdth7qPQRY~R|HXvs&3^3KuZLqbr)x6Y^sFj)+lXd5c@)*;A1*@AbFLaMbD5rXD#U-V@@H*a>Q1}$-2Bn{`Z~M< z{ZCF=kfK7{@XGJs@07xFnwrjG)DvX;#qi7W7QPDdpk``*PQ5xf=?MyA7c+U9zHu@9 zkJeKv5hF-EQ{1C{w~|&i@J_Y=o`XAT09#K#Hgo3Q2m>qYw~h`(Tn`KzeV|)qszu3j zfW>QJe8|VV_#C6MskZ8(wv?af>)(gkbhIJAf6SR5x0(Fo7=D7bQI4f7-&j8&eW&u= z^{*Ts;4>laNm-(^3*!|zhU1^{C2dCP0w;^2tCD`_RB>?8F`awbGkw^4Gc_l?+xAgx zcT#fyX^-ZM*JGSzTGYYBMy0+dEIxnu_H|dhjjvLjKCJ!LspoDFPEAdH`3o|FhtO7WF!_ zVvT*`i6nF_`Yw9&OK&pf82>diUh%W?t2VM?9Y-P49DP?+P40FJ^mCH18n;5G%bV=I zvx@&YM)lz62&TaX87zmhva^qlk4s}vJ@z3~SPp+b^16`qiR&{}g^M5PVSKpQT4!f@ zeM&i#MJtJ3+Tjc6GL6Z#lYLLTx5Xs3R(MG)QDbD#2W4iJ`~B7V51G>6ya~gH{3JN) z4?3jyE*>7T821eus`B$K4=GPmt*FjszA=uur@r?f+x3fgcO$m4;60ob!Q0U}=1PYm zX+lpk-u z6%tkRHsv~ISZ4WLF8xKIIb-{uS>+l0XDo~kE{$POQBb_5A*^qU6Zm4=ICbeeuPEuV z)axlxxVDOUn>wi@jG8%V`o)-Q-p?ijoYr8C{_8~8TLFa7?Cg>5!J3A+y>9S6;8_k>xag|~VmxCYH3fjF) zRtnb&NKC~%3r|_u47Q`1wwS~UOdNx$?H<=xhK;zo)-eXGCZs+_MyocQm%n5uo1W8u zGE{kJRBp+&jAlW_z;iucZH_MaL{f68hxz)Q3Ge+bd3kLWUp`#A4^0j8`*8s~cp8+X zMf(Shec!`7W2kNK!^ZO^DP!l<+kG<7KK)PBS3Q%=P$(1iwq6gO=kb=FgK_xFmoG;= zw$?YFa*FIdo0L#BE_g_GJLP|C0bU*QUC$2pmqi;CbA(;r-wggII#m(4o%{C#Q=*Cc z!DZ$2%*TYES0b=<*&F5b%v{fiC2bO0rXB@7k7c8=2p?y0P7P`(m#?c)G$y!OtO)svCms>m&)svEOPRuh5@x4A8i+`Wsr z6Zii_{cEcwN76$-MFWQf46`@}^tVBkP@9)$#<=4M6;~Jz#fSWS0!hGjVZ^Q%i14hh zH%*?uoX&1kR5r_$GZajkPOO;`f3jh6auL3X=he=GrTU#9F~_qpxz{1^kMjfO#@&7N zPcDKOOiFAsA^e>68|b0B$KC&`w-|M zulHoM9Yrzb$?5YFpQI0vB_yvkngsmhJ4D5Rw{ugq85tRc3`0r_Ae7v*+kV#0FLDJX zQju@};Ba_zpU)t`_%Ur@^D)b0MP93nbRwk>KSp&jA;E~PGyZ{!o%wF*n`4r|xUuoO zRxXQysu*;EX35!hLah`8ku5tX68f*vwYBRQ7?<20_SJ1#^JEb55%=5Fnsw=@ienHI z3~(=~;t3r^3`$3+E57GegZ3D1>e+$qH}uUk7^z6^mLf;ujxz)S6QISxqE0C;|rnmXSe7zhOnG)w`A&{9$H7eLl~>}{ui z>DIU6L`$+7zW3RtM~-b&Nd0Kv4vgY$4Ng?sQ&RXo+>gwE&9vv>vUj!4Ow*fjW?vgbAgC_h=oVow#TboBh?^826 z#W9GiMe^i}@6TRYOQNmRu)pZ7D9GHF5^LhFsy4rSr&Y9zebo#-F{pjF5w3Er z@EtdHL;ue2c5zJMrss;mu?@p`8yKz&E-iN*RUXUSc_AHAGWXO2L&ZbiDz`GXV8`oU ze6Vz1%6f@$1AoUbU~y366B7_XV;UO=$FDYjUjQoY>`J1dp!DT0r9{OI&)_zf1waEl znTAGUNy#5bj3MY+4Q6^nBN!lkL(Z>0o|?UM|B`|=@7d0v&U5Wr@k(c~&}~cbq*V0U z6pcIYvN}E`kM3!W0A;Ms+^(8J<;M?{uNpeDq3`gM<4i$Okr$vu405p|-uo!N)VF0> z6CxpTg(3^mCoW!h#`4*VoSjP=we&M-myDXVA#wMgVZHwZC{+yowSSkYsI#$!;AcGz zeu8D_U}dkQ!GA2+rq_Dq=OvVB%%^E!unCSAU>MkIkb@JGkO&J1ME@K;{aY5ix0ShB zfu6Lr6>+;%q%!b4J%>eWh+5M#Z3{*VPIuvcP`L(ax%Yjn^WIHe0q&C@w#{5 z34Wwugi4^8t(v)|HcMjSk_-l&>@i*3tlNF-v23htVW)PQj90#+iHhUoK1}5`HFVZ- z9;wMcIqsm-(}s}SlSO-?(lOCRk@s(K`Js^d!SYvBR6rSpi;HV%Y1w{xYyd#KmDMU# zC%Wgh=qxOS)zvrI)qI;j1qq29eS*i<6deNtB$Y3~sR34nbe%-JxO(?u_C(lD`Bxj? zhiRV#i5ZDpurQ0T4GZtgmCNxF z$rW|f&|XDmKQ`QT_tIeVx$9_VMeJ*N?X1W@PoL9tmpYqpdwk3MQ|5joCW=Ik2&(0; za|#O6*lhPtMLptycO;8M{X%8LhlWPfpN-tM{;;H!b+sk-HY+~4g;3#&X(6{LDVz-{ z{6jLc;!%Ox>#Fwp5#!Wd1(o=vmlr75n9ND(@UmA|nK?Kd%*{0o4gD56g1~P9k^{6- z|F>^dW1Wzo8|do;tXvIADHKRh^+GhY7Jm$9!0@4LJv z#&{Be+n&DA?fKct`?Qdqatxa{M1hTLip0-Z_66TtyWGK(Mq1f;#)PjHltS?lu7@89 zBrulHW&qy@43joeTF7hnT~?Nnyu7@rDJ`ELy(tkuAu7L$zIO`~42@-^Mv%cWU( zH;3}e@yt?(H9OA+S!I0NT*weAzlgdZa zb5-EE*R-wA6peA=hZFbGVwJ+*{sIqI!&`eVWJg6Z;_pyR1u_nV_4MxaRGQY4%x_c3 z=LN|stF|n)Th%ktq$Ze5Uc{_9^ZdT^c7y8ZN0AcWxfnf<*xUlRz~2>WQi1|hMFA;)H3 z>@7?hYbR$szjs;{mCcd(Nl0&MOsqkk6H{q$K>2(?OVVcbiZd-8;gb|+@rc|JfIVo4 z#-O91$bb%8)}*U8{>{^m*A`StR+gg=G>Pb`<5I`OF_;(`{S=d*J>H59Offa%>Dhgg-u9R1=@hUn$rOTbim0$C9mxv{=}TGFtmeR3gQhXdIc=hT3> zF=S z!~#WaYyc!&hDJt$0s??2U5cKZ4hnF&tgQK4hUs|El@tx($R=+JlW{(J>iA~1-c^q# zz|7U>UDTWxJvC>6i1X|Kf4ADpcz5gsSQ{1=7ASW>dI8ykWE(fBUeAJF=BcJ8^o!3!%Qq0XYDPv`DJgY<*8w+l zQ~ot{C?6aieo$itMTPJ27R2L6yu46s-SpQ(rJL*5p$642#_ii9V`F0@Bl611M@L8Z zfeXXHFs+T%ekW#WxdPxOaHL=sz+sZ{STQm&NmRd-&;k9!W@S5+SHeO<05<}K(NI?Q zeP{@;tDogxRT+G7GN7)vHa3D>5wdN7<-l=EkeaZ=*oeM9;3a@-^63u4f&0QH2R>cD z$*mYBI}nbi#wRB3A0MlzsuqsefIb1HaE$)=kt6Ut56%v;wPj1jBPE1s13zmlqktiY zrg%V*0O;uN>+{%|yAP0VC=a<(rT{kwM_EHdnlycSM#kp)x?$rB)DlWlwO{Rl^FXoy zkMrY)j=cN^w6AcQv;m29@B1##i&|WGNFc#6LvO7iqIE$BfR0DanA8hM25@XZzygX0 z4LOd^&h5}I567Uuch603OK)lUHzam#sHFw0Aup89z`_Cu35+EQ3X0#z2O7jb^rYVC zxVTSMRiqH=AmfCx5nxMzR$E(IcD+QBhgiQqmXwmp5euv+Eo}yd5oQ!n3VQ%rhNFPy z7Yw)*WUnPfMc?1@-s9$m*HsIGDki2cBO?i+R=>`xU6fYwC~S>^^iWq%kj!;~ZYT+L zn4Q1}?^_|iOZ&?x*a%RL@DBiPv$MAc;Lg#(VRbOe-pUG1RX$uxRaF&!2+_pN?NM-N z8lX^MVbEz0k09%qzBOID2Iwd3BNuN!`Oq7V3oz`w5t}=A?m$Yldw2*WOL!tu8YB*2 zA~&zAw6q_Tc0E0Ez}6DJk@E3r%FTUdY&-`f^0e zh<>7Rv{2gy@wPNy9R1M_8U7kWk;^Z5S`BoSl2SAbf`I$H+4U#`93JQB?=b`TIV0@t4nqYa?Tl70pO}R(` z09^YVc^4=IK!0dNyb!Iuo#H?)!P3F4@FnT#=>any7#s{UJELCj323RT#y|`T3#vcZi6L0KW$$)!BJxoek-m zkZA6^cc!rSr&NL{FX$wH!2`K@MTu<|j{|01E}kdjT+1_se9h6-!`(Ls&=%2p^#}52~m?f9%Z6 zx;l|~J_lEZ4an2nxzjZ;&}uiq&dFH`7^Wmv=}2*5A&a0OKok8y4?El0!8Ur~?rsE$ z-nv6%PuTr3E+`{`mcpO|&=km7uOCQg>foTHr9VT(eC~NMIXpbo@Z4NU2^(}h^k(;q zi;E#sho#4$13M8DYwzn*@cO|`4uu8av9mval5f@l^G`!a2w4&e6LOmg2sI+_vH<-2 zy|eR~tt~*oW3#gZ73f$yLRS|{_jq|H`unkbpM&>l1R5vJ(>~%IK@APOujREhmV_sO zvfe-~dq6pW?g{u%l?=Xe&M_{o8^*35Qv7#syQqnV0ni32L4VE-w}$sIPMa$TS?j zM?xJ4%<~+?iNZo?AzuG?O8nFxl0P`Eu)?I>$-=@7068d=Y@VUEx%fpw4F7lnXBOxNpPi@El|YjAZy zlK{^F3k%*1!W`_Vus2$!r22cP=;%2>`+xj6{{8!Ru-eg4s3Gs_>sE)ue*0haxAjmq z$T7fUSzB8Jy-sTAOB(&I3>ioXBTvs0c=H#hr>Bst92|grPE>zZNmaFH6w2M`)m5wi z2;KV}18JiELXSREY6DmJz_Cusof;y@I~S4mZ7{ z=;-UCf+e-MA!=}fA-G*=B^>JShw81>V_Tm-0bmZ+9*c*7n?0Pn2Zu;(Z&Sg-cKU+R zY;o^lol*k<0gj~w_&G>DKrH}HB9%k$Rm>|?y`Cp+&7fWYl^XZ}*p|w)_}>Qxste;# zZXs3J?V3;g_z@l!hK7!=S*(SO91PQFIP^b8xR;i^zrP7^cGS(%MEf26A%a{6yem*g zZ~+~#@H4DznFhEt;9<`WhQtUY!4^Pel9-$vQfB_~%Q<}P@bD1+gjWR!KF~hEAN{ws zO`v&8zH?dxKN}t%1}Vq**Z^rdvHG&0ZPFClp!?%)d`R*5WMcOvjc+#p9N12 zToW99VAu;;R1uVwabi~jfJj&+4%ft8h3R^sWQHv@a*4cdiytQgQ-hQxzv2_6kD zIL`|n%N{%sYsffCfB2 z_W>VeW@ct@k41E=gPp_$(lYq!P)_v3P=w=D^hqvn*F2vb86CX?>Lv*7@$tjsf6mOveQN{Lxc0=pH%S#~e=1jY z@4*9H9Gs)$W4=d^J_{kin5VB(Q(o|hunyp>)D#vjc=`Duj0XV!1GWRi7UVwgbBgMN zS4T1_SLV9;A3jtave`H~w)z~EfR!CnO98P2;$Cp-&o+?8I&E%76#=UYR1;d+*0nKd zZfb&DO9)gqpn{~QuYoBzuw%Vn2+pp6(LQKDCdFYJQ<0H1LT&?( zk9qqxT?ouD`dC*7qk`_>ZQglsSmI^K;nXz#DFX`7v>3RuqG#X^ulu$6&LdY zkY!1Ab#=u-eq-i0%C!6UtEtK1H9?*b5fv328Gtkc1^|gEAu;jS+J3-q4Zkc$?s0B0(@cXao zUcpIqF3i-R#DW!pBn?~y3?0MB1sQZ#-NYK~2zWn0T)=Fi4xzOL7Xc=we9+5Kf{ciP z0r?M&>1itj;U6Y}S&wY*NZJ6H3LHBeD|7mpJZPe5V@sVPJ@%f_Pp^$`Y8 z%7EdONz&fC2UWmN@KJo(emK!DKs^V37Su5dtEUEbbv~Mh%sBxmHBwd2Y?sAC1GkDuVE0FQX|CT!MD6vbd2omO%Pb1#oN69 z^72xiakNUC@KtayfuxrRwQn;z=Z(+_dZ~dRwzsrox_>`LTli}nO}oot7o_=6({yY3 zoBsaR*Y}5_Oj23d5ilQ`%UcJ>$JB$Z3i9%uPLvLB5N{&bp{j*-0utQ6TjWOZGuJ8% zGqE1*X`6J018)t_;YyuV+SWRH&Ox2%5 z32=Qee+?(BEc%$=XK3X=OJxP~!NzU@0V>eYH|n{JjJ&)&Z~;&afY@R;x(SA@L){&E z`EP$k@lNEU5+8K!r%$cGd&BxdoUpbw&oTk)F1*aHsBpSAMeo3HgQtcN1?ma@M~^^Z z;0m}qi2MA5*&3Ugz<{<0x_3c;JGB+g0A~dHU+{r45q>Tx*x1^dB~JT2`|%8f+^=5? zcHjVw_iF0uBLGl=!WMXD@a&K+!!>#>I|tDcl7ldw67bg6;6bM#4d|Tm9VgfXpa!%V zkj=8Ov4Kn(ss+g5KxZG6l9Cb;fnjJmobgZ2&erxeCMKw}WiW1IVVS$5K1>(MtQ3|V zii!ml7?fe)SOMiZ*5-f=c6)0Jfb00QG>p~`q$t?apkC8!aG?tl?^xagL=Suh>NddW1SowLrXDUih_%V<9@|15AX6tCk6#| zf;bhl8Nsub-IUtol6iMA5h-_?G8~Ko+pk*j5_K*Oo9Iij}13n&^JSlxWr}S zu2DCkxq5G9#m{XeFtE;QPq>BY4n#5m+gyPzjwRDZM zB|(gbwG?r1?sy#)M~j;g~j{qOjVD)BwCX;;p+pB=NrCyBP@)J7LX}Jy#X(EDiItOh@N1{fm{Wp z8T=6ZB;@zco*_9FAx`yyj0GPHOTfy(0qDt}zkg*hvIJeNCQ7vZH$OvdWgPc_&HJm3 zvw6z$9Ri}%q@=pqx^%I?tM)7D!ZCh_sSb;WknEomL&67M8d6YZM#dXmG^{&ZPz3z? zMM{jY^^$)YNn# zB!5>H%z}zWXJUlN1;}^>WV6BwR_7l3p3A1Z8{2?)SD;ZmRT~Ya2m%mBMn+I9e=09m zzDZ$jE=j0RLBL#r`Auj_LRWVRa>;bRQKLITO1Mr>MNkIM{T}#0e*knSUF?|HKKiNS z&Ijf72aRF?4Yb&y1P6WAkh#7CrVD&WC|9>uI1%rOV`L55`~*WK6H-&53X!H)H!^Ci zsTl<$($)0>A42)623!3;aSXd*4AbeVS)HvWw6KNZ1B?z+B9SG(4G&kr*t~)QS@`FW zZ@|WZ{XE5Gf_S+jm#YR6H}QOpY7==z4OsoMG6yfOhUR8DIet5m35$J&DvP?B*0+_S zso_!H45G$rYE_&oAj{|B=Dsr3%zl<#QVkmqXnF^NrWvv_$it>2p+JDV8H9}# ziv5D}lj-y3NsTlVqe&cJ!#`)b)RVHiAyJlc(TJVlpwi;iC`B0FHs<-o5TO zpaC+g?uM}EI0M+CuIo*L1(|CIevGW|{%?8U55YBqV)LdyAtE)^6ME<%d(w>s zbU=a@f1wF|L3kFD4Qh35z9)kClC$*elpogsT!UTR_119I<`J9`Y;IortQ*bQ*;!x~ zfgL4ko=8{hqzD%vUG&O#0`Or$A)O$G- zf*SWg@PBIoPAqA6sflWEk3(=gdOz0=QM6J!b#`=gfRude2V_UEk~1?io12>yq@*3Z zI2~%;`t;R34Dw1koE1eyr|tc@_fZht1@#5w@+zho-OrXHIZZ6C?HAj#?O%?BU0&dv zI2ViL?Wle(w0^v!lQeWvWBM=w_*#H}kL|(jL#YOc1uZl$_!($PV8kTuVIz_D>YBwH z>SgJ*^%p4p+p4Zoq_~UFLl^!^?vy*tOkP`vvD|KI2}SG;LiOm8w2TZiZ*VX%$v|7w zPe}x#O|S%k=pb20=F#$0OTGIl5n&RC_1d&(M^!It!=Oyt?a62E2z7a1?=wF$m)quQ ze9DY>z_nBv>*>-A2gY=wBVHEWLLhP`23WS&ADX9Ev|tnbka7MV81Exc_U(pKT1YHsr8cBWE`(jy^-fp<}T0gi>oQSnZ=5HSSm`48~ zmQ5i%=(T{|c2JO)({6BZ-%iS27OCq9NJ$1Ckxlf-} zu_31TN)C4;g&WA5H^24mE&u83Ppp{TQ!L%SS&CD7kw3%*863h!FhHxU47KqxC|+MZ0;5ro`y5yzJ+qAU9fP&~uOUm-N-#3)#Z>{T*M20RzSE z!GBp}yjE`p4yk-mMk;ozFs<-TNVSRBD$wO;#@?p=lB{(o5cW3EcEWfpai48Hv#^{Y zTA_qMa)2?Bxq8`l62@H(H}@e)MrcZAlDP8`>F^Lpw*Fe0Yu?g{`tj8yO10$KokAs7 zo*=9{h~sE>_%^j)R&+EJ{oJ~Ch$z`*&-?uhKdbGAeYA>TN4y+bOm=@RYR<5qxxcV* z7AETyU}o=#zLe~$=QSx?bm;MRXBn-F(48M^hPDl3t(J2GgEOD8aQN6_=1c>vD~@;E zq?tNv3Yp8N#K?6L6@BicrDGiDe>iR# zD{*k@>ilc=>F{@^imDD-G-y@O>-n^vnOCQu#dGj|B1Rs^AB)KFkYslvbLuXcR;M(^ zEbB(%5xoGqnZ$JIT%QA?^GOq5~ z26|aWewYQXMVu2cbB-^{jGY!kjcW@(5${9<%m%5Eh+P_Qi&#TR~Rcio?!Jd|T}NZXBjO;Y6~fb_<1EqT0bLNpppG3&!h%om(IO)TpQ+ zJx8Lni%==uqE&wMrt^q?4^_x)`z$m$U;$rTn`~LRFFJDr%)=qqX zmCJ_U|6%G?TG;S@b6tY0f(%YaU=RD01X|X;}5CKim$#zRjS23-RAuNpW`xT#B1DsX^tirltKntxcRTEv}WWwA!7Kv;rq? z|Ckpu-(F2J-q~z0Dp>mTnBZCEoFnm%BUR~Jh?>|P4cXEPSBM_Va5vCppf3nmRG|O^o}RHl@V!p1)mY zO!UO3`rxUC#58qG&KLE?x?fulw)j?ST}RS2G;4x?N@*l}>zKd$K{W2RD;hu=U+L$P zZvM{8M=QXU^IBJsn$!{P#-R|z(V(r|+wsslrSSY2L%xdAO}6>Os2C|-mF~tq!kfzl zSc{oIk8Wv^>?u6eW>})>pT*C9dTyb=bjq*UFEY>MvU6+P#bcUDEyhPPq|9hB6Bi-G3xvM%f~+Q zkRL>AVLUf3w_pvui23)z#8aJ`d)~$`aXjVh8IFF=e0#N|?3)+hk+t#hD>a4S$tNDq zF7~vXS#nhgk13uy#~Pv^taUroY%cxl{rj{_l4e)df^W++=xz3u6js8msnxHQp~UtX&Zl^UqNJ>*xf zw5jAX{zF?ySN#PCeYDXQ>G{xW=Ucxdhj@=g@S0mbTKy%)8ps+$ySFb^Dt$MytiO% z_u}tcyYB(o@l+VPDGTH;p)4rezk;%C;rlDm;fzewAu@Co7#7)wgy2fkMxp%paB1FSYJIf4)r9YN8Ier~hN8GFY%Z6jS<4AVO)+A=O?sU`z^re;M zWt+-pIm;V1_;*uZj$z8`Oy-uAMdL56TO=_@G|W{zkmd1EL%gIR({7rXT8;4eNePDG$Tme}Fu8>{j7xD}Q*XtSxn3cXK9-msiwCU+x@~t|sy1BXn92I0w z@{-1;e|6xoBTnYg83&Dalm4Fa7hp=D&HW_}P|OO_A>Rygy3u;N403p$ElC5%AW;5J zY!E7ks!NH!Z{L7}B(h*y#;-`#$<$WWwnuGamQvP+k?ALZgm=V2FThuehsCy^^n^+l zewLou@$~feo?cvZ3!*OcbS>{e)=#iV(kN`_yK5Ga6wdTsLFbsnP5OeQyqU_Jw-0;F%)OxJJg=k#lZEi#h0wE$Gof>U#YLf#2cSsT zX=q}C7tTOaVoq92L^VZ`O(`prb9CgGB=_z5965mQ#aWZm>Lgtx?!`2 z=d&g6tfubpZ6vzfcztsquq4?K1|iUJ|L$M=S9 z!*2cWPLTDSqjokSAjow;+3hlqI9Fd~YJajZsXw0?r{$Nr%;|b%^JDAggI6w(I2q{{hs&CzaDTrWRdp_CWc_Ph zyTRkZ$k&mM@>1q~A7b;msOS~G17l9z&x9v>ifW_y;Hf%uJ12r-rps?rNs3F*ja$o(Ehpp z|Fi)N=hSzm{J(#*%m> zQ$?bE?pnx`!>l%gZ`TrNlOH=hdEv%f^4i4G%9b)Q__8}$ijDJboUn4@*{;;i+Hemi z*=GNAVE@Ul&12hj!X@-Sh{E zAz0((p|UlB3kIDugxE-)$M0=+|A-*Vu-wMEBP#e-BMx8EndhM~z5Dy(oCp=uspk>| z|4eUG(7eIV!V9*H#|(17otxTBvDS@We!Sf2wl97BTh`!6CCK#bn>KXx+}RFu8Go7 z6wQ8%UWMg-5#PaghyC$;4~DJ2Fji!9rKztR2@buh*yewlB#=htGKe6Z#xT)-TvRaD zJiT_)TU%JNMT#P;z#C<|=~=A}yLTH|^`U;smvU*vIaL`Jfy!>2Iro2F zc~)0zhNzYQS8Hb)4`ti_@rf*1LS$>va3?7@B4p1_g~k$N-<35+mNCW_Eu<{jlRYM5 zC(9rzvJDwA$=J8CWnaefoVxG-|I>@-<#W9_-puD(Kj$3Rah=Ea_dC<{<_5i&?(KUN zO|j?sKjuqQXJ*7)YvdfiHkP^-@qY~W+%CmV-pFrk9U}H1o8eSS(Cm8gr-jiZ?8Tc0 zj5!S(lySSexgr60{Cb9NfNppk4a9{zhy3xSM!p8z^2Wm@_yF=qeq{@ zjou6>Nhng^)#BW$s`qe>EF|wXv_dNAr87*%)Soj6QPNab#}*)B0oe?;_I#^@cLuzq z7HdXpCN}dmV|HraHNOqNWTZfnj#IwCN?(t)nKc@9&DyX+aDQP4enq}gNOww&pDU|x zXk`C~iD^Gr&BQNKR<>7QfTB+b`+FI&Abv;Zkm#|V)K_WIdGL2@P{Myhmv?)#C9Xde#lnP3M;L@m z`{Vi%IWnvQ%T8yC%_a?en_J?t(o;|iuMOa19LL0jvl$9mtZdkN*1QphCWvO90vUsR zr3@ChA(Wdghm#W~PK7cYz0|w}w>)NH%R4vh zlC(lDTxG6+$$B@<9*^KdyyMFge^0t)nfGhVS;A=UG7W0thQ5IpS}!muL4^L^4)s!n6LhKHCW(dFERg&k{&$Cnhj-6Dtwhnq`^LWLC4#I{G` z)I!+@Ee?)SHqIZPwfQ+h%vEo5j#uP8g1Pl=7WQd;3x((6Fj5A5f@Rili8v%G>A1kl z0+u-XD#^^QI~2_!W!~itK`tgkc5{xqSDcUA+c;#HZ2nY|8ay+vO6??FQg4Np@tp45 z=?Y9=n+ojSNguhtYl;n+zSmCjqlnTy<4fU1+POtO-)P0ZSU_y}wP+9I8z=C{{}Oqf za_mb7uW|N*u=Fyz4$Hq)x#w>`EKGDjQkEG9g@{J>rknSQb4no-cFdTegPKFvM7l(m z#_JNYKK)l3i;I!>3$$}iqsrbSs0_$gFX>{Hiu9g6HQil4-1(?kHKAFZynLDmk3+XE zg|kk$t>T9}Y5XNLgWjaPO7x$>W%G^fPall+e_s}9IF(%CO0n8h;xQS5dM#x$dMw26 z&9?aR7)p=8QH1>#Cx+MYbGQ9dU0gIB?~SBZkABfzVeDMlJA}klVxA1Hw>AW3J0-1h z^%wJu&&H;annOlQD(nm#IMF3Ki(8zd~;E^Rvk%hcj)MDrWxA(L?G0o z72ws-nHR$+pn0(l)4jskA3kgY3m{fFgl)~xb!2_+?~*HcZ8E=U@dT{U@4gnhD0In5hqiQk%TtCTKl9!1#vW1PET>@zBdwXw^Az+Qu(xFAr$OQ~Q zh!vRT2CV=gMo4aLJ5b;Hg)&%2fr9tsIa!Wvf6yS5?wULv8qH(O@FY4Kx56JubQSc1 z=$Bx?N!V^cqSE^x;HL24nEa(CS7ZNr$V&}=bFPHfGC*>{jR0s!jlL8y2b(sAt|!^} za7m>u0#FhZ#{d%WC6xmshdG4eYMX#B>1aJFpz#rm11*5(t5k927|?|O;#fXhiID^% z@2#!Wu4%btM=@?Hk= zg;nS??0+$LNH~51y_ZNV)oo!wyiqn2>df8U!r!l+6!tMOeYVMj$;v_i4TV*yEsYOt zfq;ptl-XUI%z=rN>Dsqj%tAqdG4KJdgh@`Aw)R(cJ^7m-L%#MHNx7Do&o4gpu}m8` zIEX_lCLr!)Omfr$J`ihO6B|lxgCFyBEip+kgWJ{*mo~Up%D-P&*)OE86AV&*X0`pR z1-ZGDJIb9+@5nT=D*5--z0A5sIh!8Ao#Bt%Dgu-|rj1>~D|Fx%s+@QhNOFiFzl`gH zP+Pg)60~^oUf4&O1~`)@i^UqFPYVT|u7p;M;}0&`8h7A42R|nK z>{KK6Tmva!r}k4(P5~Ete!FR&BV>kFJCl-S;9*JZfvrA+RgJ#I_a!@8W+}baoQ|b*$?ynJ%Q1; zv!{|Z>a-l8h@I3VcOO!qDjp(Kyk)`C7wurd;eLj@mv!m1JG{IkI`uD7Sz1aM60?X)3l{9N{aj^V?Vm}c_UBrh{ zn+oxq@|3AX23#bv*Ak$0c%QWV)eAmCfr5H{E-Few33Sb%DR=LQCSe=r4tjws;h3hD z+X*9FWo!a_mhC71%_^FH$z>(>Yi2W6E*pd@OO#{p!aE1e!W2DhhxS{LMlLmfSfGnc zVUa>UFjFXY?KU_wAWtE+igFw|--=8N4n2*w^ep^}GSMF~Y%jXNz@ z(Dt6Q&_^e^Xyy)vL%Un>4s4?DK_-4;NeR`T_%L%VZ7jA${UIc(eL;R+lnakV`}FB` zGjF5PMHJbFP)ikcV3b?OPjfWZ20T4PKMF#R)VK(1QZ9NJ0*l~V3VbYgd0)y3Mnx5q zxYnu$q>1~4d-ks)6qDaNN0wB-;s3r zRRJ~wh06X^N3DXG#-+ldt5F;gH5V@xrpBP0+~2eE_t*y+ z;XVtg!>%7cgO}A98%LTM?^m^43C~|PulD;DS0ubl=iyrENQf^QUZuM=M^yiwz9ze+ zREG(2o|K}7{Orz6NuZGbriVtz>LJxnS6?d5ex>=+1~EULAgRxEh$r0Xx!St^8R?Tq~%4r0=wqq1H!Gx zJc%UXVV^vQL(=Nu3(E$Vjb>sz&F>Rswhx-`89r#mW*jW7my*9J%)o*wuri4WE6U^~ zt-XgG4ULLhyJv=8R3I3k0W2Pc_oI={9fIyq$Xg7An=dNHZBN>6{!L+aemJXF$J%L+ z3VaZG#!7-$h&l9)pF{F#ZlzqnVo4XDdop)$!Ct_!muI=dzoM)xcAR?bhtZL?fle>N zRSjvs*GciR!t9*>OxFwp_&RWca;p82z1Q6nH4B?pdZBYfWo&a_f8+DB(^|~jd-vZI z0I-`Un(KwxS$4Js@~l{3P8QBuCW{zR;x;air_nm613s`-=wsj@EudZ*P=!&^PhiJk z^QzK096)iS4zi4c825Od-dbS=^h_`28&6!^J9)4hus!5d@#9xPphoY`2L^rIIL7*g z%J-rITW=;$cYbVI(4ASn5(s_n4ehV)_u*|3-a4gQN@6ibsjUP^h*)MFDTbMTNrPMo z@hP#Vp2$L)fmryJg${j}DnSti3k);|j*1R+&4WNYF|5M-gh~BZiRKrI+9besh2*r( zF_-~{OQCNfU|pk8nBOmZ+lXx#mSv{pAoob$`@%w<&I27CD??a`hts;wi7=IFyH@Xd z5!&!-ApB^$^UWGhLOiftfT@7FxY1G(u?K9%m;~XUYM6fDXi~r5U3V`s8)V6^%f^B9 ziExjMSYgdpz$uDIODCpK+lVSC;0}fl$voc~iTN9Kp}j@{8<)GEBmoozD6JSyhU0*} zPrA&?O-f~^O#*xs*>7t?oS+!OiY5F0?z)!R1pqSex)d<(#)(9>*i^ztAV!|r5XdnD zZMB<*{xj!54{ULb<#Lnpqo^A4eO|E&!v_G0&*| zVA`;c_SeAVBdA}*E!cB@&mFQ;d&C`;xBtU}_jmF8Que4G{xb#>jtc8PBjle;_$U5( c0ZFq5EHYHO=_fpk7mp63t*)zvRk05J51#NQmjD0& literal 0 HcmV?d00001 diff --git a/docs/source/developer_guide/backend/fakeq.png b/docs/source/developer_guide/backend/fakeq.png new file mode 100644 index 0000000000000000000000000000000000000000..5c992d07918d387756d39fa31f593e7e0b6d38a5 GIT binary patch literal 36306 zcmb@uWl)__v$nZ$2@b(6KyY`r;2zxFJ-BOd3m)9vf(N(Y?!ldnyX!o8&pA``P1Q(E z4Sy*1eq?E{?z^uhLP6ZjL&MO@QG#lhUg-N?xdq+;pn;$r4x5;TPX0+E8G#Y9v+mQS)gg~wKKx+fdP z-6_Vg#^q8dbrOqUtiD^o$JlyiQ}OR=jod{KdW#Ner@%iBXP4+N4c4=3(SD~d3omYv zQ)VklFPoDq+rO7IE2EJypMHH2-+1&pf6ewj6S&VlZvk(cvf`1VZ48utEl{u0=5X4T zqCkX(oJPC6ysWpFjE#!2$*QRdWCvZIHrTDGe}Ov0fDE)mB19>vsoAhLAVH1~$KG6P zb5~PSlR|-q&PO+8)iN^r#M%rmc73=|m64Kyod5;g0WB{-uHWJH$vH}xJ!fuiE;164 z0uFc&d~$L!lR>B0K6aomDH=R9aO1~NIZ7LAYcd{EwD5tGm{5rlg{&Vf;-u+{3JMBl zW}pA_yzk}Zq3-L~z0NmL-cv$Lb3tbBES9iP<2)!N#Mh1kEky88F8Ce26E zcqthdN^*G>T)UN~BIU9UuUoRjVTN0|TFsY}rTUYTll*)#immgQz(A2^`?bods^t3i z*{Or6JSf&?h6q*K*PS?_hK2@S_cL2V!vNJ6@8G_z-rnBdy7n8Lz82%@MGuJKVq`_! zi5nf>TqXn2K;y8mu~Sk~$mAc^Rn^t;@$m5Q@h38QoX=KUHr~2tOB6}?J(MUg5JE(6 z9t|KiFFi0K=JLLT<>uzzovzURz5#mJ?6A?Ke>kd*A0fu-u-;Ckm_3AsXz}`V3!cbQ z@o6EKjBWq66B_hkd~(u)H4W&oC6XF{hxh$@oBKIe)8^r92?IU-59W`kp+;ZAP<&sX z8iSr~b#*g~ip(mAa&+ESPD%V|Mm#HnQz=rR8+LLTCXizz&rGNnMa}WVP6=k#bkEXrqfqbCs_;< zlA5BT5ZfvYtzU*Bj*h>AKB)PaUKL2j)z#O>Luo}^c(ez@p>jUo0j>H>MMcg+8+CPc z_44wP!R0_(r-YTZnow9+*swnwMLPv_XTfoEphBYt5fM?znGQWdEFmRj;bg_2(}yNr z>IZJJl)XLU6=P{_3or~^?CiEzcD-~TvfPB!^KrM`*bYAQ7Dn<%UEHcQ=g83{-8E?S zC~K*QR}jTmixHfTMqL}-q~S05z?A`Wm7SffNv~e`!s~iexS2WZu+eEZVQ9z7Mr}L@ z-+p!1g}jpd)9=oL2!?n4Qo#R%!Vq@EpP*6al}3(l0{5ayq}T%5Hp8pwKi&L{ufHr+ zl$Qg$p`Hlo^XJdw(l0#I=a)TYKWCOFEkzG9eyX`4GRzu{#@s;0OC|jpB$gyTza>?p zz}SOp>pmHEAgOGI)vYIad3$|6Ik62{CP0p{Uu*s3{49W;LlSkk9l_Dy?yr0gH!~(i zYyfFc=hoZiH}OS|hQc>;2~^TGdC}D$PaSia)wmU`R`9-k-1S0r5*7J;1nR@>V@B%R za>NZa!hh<5UF4d1akg#VDa*mRPZ9uG+%68%i0@{@Osi8p2JycM&64^Ad$$WveNOey zk}pxb6!;Z8EzvP|FrL9h8_%|6DP3AAqbxfjGT6GqIz5(YAMN|?J1brnIXk6}D(%P1 zL~@jO`$q=J(5=(<>IeDCHBL<5pW}B3Oz-(SYXjtWSz=@~Rmeg~GWPWLB`WtC=GR|> zG<*a%w;2>oM`q%DGnIcWJThf@TG+`!^1LVqR6X;cQ$^6fVP{A7L7`0($Cs5O=Nl8?nQOTU=1f7&SPC9d*i zN2!PY@qer?v-m)P5mDImx@&Dhv{pLZ@!6fC=w5f+`hCMWzC?#$<0}P5+2oZD@5!xf z|MOEk5>g2a$bAse>qt zw51oQ0AmQ5_uOkUW6m$?Ki4yq82i$3$F3L6y3A0$Xm7uXROePNd8~j`%v0<3rkXUm%Vw1>~|KrOy0$} z(3xy^ruJVLpa7XO=$c*<)njul(ZBwvh-$}w#R!wkp5H~u{KR}rDK#`kf_mO!B z*>VTm3IPfz`K8x(!RGawp)^f=Yj};u!K8q#yk>?bDf&0u;XEZL_^f}$N_h?(-(r_p za@Kd~qQV+u5B5D*saM5lS?#s%`CNS>Gn&kGhz}*_1n^di;4v;g^2=ZF&1LH zR3Y;_H$x?{G40v!+q~%m&Nrq2{~yxtJ19~$Et<2I?D#RFy+-N~4j?D{7R6u1(?jgC z-W%RmLLX&vNL=x(Wq>t@Wik+Sgcrc+0OrQ8!u0JC{{bNk0rsGFmL*+zN)ZD0eYwc& z-)B!p7^Olt<=!&mO1Hv!g7z0Y2Ng`79LS*Qr`gOWi+)0npiE&L#ISzy(yJ|n&*F1e zABK9kfI^`xM1Uqke;>|{$YJ=e)#$0OK;;kwn(&`YgdNlM zpsiJcH5=X<^*0zN=^W_Ca2bGu&lebTfdH8sREHa4LKs>brVIaIz%UK1CdjKnFs!8@ z~V;^Smj=c#>6e~G;R~6m1{;ed1L;N|$m#C>{HeC_rzof<#bsQzm zgKSyd;it%9^N~kOpX*rF^CjqZ@j4*Cm7s_O&UYtzC>A)cPh65xtm$4=^;@dfS z3LFt11^8K$ednjl@(N-pE4?yF_>rMN?c>>h8ch14clrIdr*VP+qfPelvY?$XeBOq2 zte2csoOF}O^}y1fg$d~BU;79QVEv0UmC8`UJN>cg{na%wP;3?@28H$@-!feaxIN*VUR+(}IquMLso9ud zle4ZvkC@BggafFZ)fsH37b=X=-Zec%mY19>nBD-t=B0Ia;ejZH8N1S3?;O z)L!!L8ZznM$H$N02&xH&N|0xHbn7@=RMZi#1NLiFZ8nKCo(-uB?*t#AjE;anVXN#E^) zQTPbVDO}$@e|v#f9Sz#~pX_77ziq2jaJrB0i<1ou(=gyE6FpwtJQT`_mCtM8zLw2F zRGiPRtBPzby!|_DK-~)c zs+ISLQ8k|vWO*4(XhlR*tP(Ur2clnSap@p()I#&Q>YQ&_z7HuMnlDx@pUzE=E6kSXQ8x=L5(>vwZ_Hg4ri&MNRRmRmmaFUR`!Q5Fb-mmJ+r^+ z1~Xt?3zVhBQU|{Xq|V8~2oJg}JrbzL*Jr(A91`QKUJ8z7Q;aY(*{0>llH_Wl6t+i( z4?R9ig&$A<>2%w_y0cjH7wrE|+?*mqW+*3=wo?4rycLxD$+szF3FzxjWNkt)A4Te|$eKpyy4tei|K?14&&IN){>ejUzn%8U9)`k#c+#DQ=cI zOhZFcwXjNSDNfe6@_fxS|D^Z9#3!{H$s-fFPLw5^ECm8~ulF6B_mGX}9%qIfgi$SC zeA&{a`3xnjRW?T{j--C^b{sXjQ*E&6`Bvh~(g)53z9GG#33{a&fpiUB5i}33Wkzy!80}rfrwO^Nf?{jBf0Yzu>t@?^f zEz7v6?7M4jY(=AV@fTvu6toyQq0$-u;P~B{5Ur#|U&%5;pdwZX zmjuEsPoyg#8T5U&P*zD@wT5>6QqJ-ws^fl;l`M=nM7mV$N$n7W3 z-+u92^+U^)qxQ8dw?tBDbF5%M_>5Lvc*G&1?tV$zpf`q?h5~43!HGt3E;&skOL2xY^S$J&r{&l#KJI&5` zwkS9@VX}z`>Z*Rb6*8m{hGHiL7j!NI=a(dDJGMfs^ec?quKs$1VXBA^%h;7vgzf!g z?|a8qz+fsS)fl&n7QiAkw~NC9ByJ^^L9# ztiWWMqWRz;!dhyW`!(-yuy3>qQ0zT7M{yYgxtfU{})X*D%YE4PUP`Y8x=-=AM9AvPBsD@lutX)yQegh(&r%go=KmRYd=NMIq|%={xR7Vtzy0r;IsW(WZ?2PAGDGU-xSO}PLe>SLD}gbNmh0;!^KR@x1S`XyOD>5`ps8576t>n> zA%2n)@TR!~B6n^=OhpHcs2_1{3C~x8izhE$q1WM=msB$eKmk!LuN^*_#TsmB3LQtw zFn|2QWBe7?15~C-Py1|}koPJ2=eYLWpF@AJ-m~!TDR+gH4xuF|S5I&His1NFCCua8 zz|;mBq@BX1_@pp`dLdw-55ezOn_NJyRdESkWJBNfDU0V=L(3()`~r0p$9urX9oliH z?TVyr{MYVsbB`bl0|MkAHk}Hd32z@ak<+oYf}X0Tic#%IdY9uWmM0?Y2-RQnQ&r|W zm(u|wjPt|icWSCarO8oZMEqz`BaIH}jQ+jEu7Ynjr~l2nPn)X}>G-Pt{Zr}?Onu8| zav#MLh>du5D8vxKxpx>4i1Sa}v5=sXmOoj%6nh5V(rnoAx!fO!9~O_*?3b?*G8Fi2 zA@_wU6~8`JG|$%5@=jac9A>&_(Oi+o!nM|heWO&OPGZa^Nx`ZS^&ywtc3b}E;IA!k z^>mv}@}xK>C+7)<%*^k#w}aelzu))p@VhyXO^3H|7B8>P3{+>S|4x%Q5`O1+I69PN zcB)o8O@Jrh_l@2)l(N093o&G#5v zK7prvN0BDL)7<7De8Eu!D{~F*$%yCA{eanb(w!3LTKX_K+dLp8(;sj1tXeaZFj2XK zYr7xS|1$c)ocA4ROJur{1ShIDR_v%&7nXChK!-w-ek6CyUlMKaTr6Jr!ciR6C>B@4 z1gAE-o{+^RdAoKtN z2~sgemrEIEM47~Jv0llxpI1K1mwbMgV_6e>vf{(KC9$}hpb^387_IB9s+}*QLY0zq zqKszDl<&rKm7jS1oPBFczjsVe9W2#b9N$tl+am4*s@3E3v;PMBWKeF7ye-%udN@I~ zOcU~`ElNK06ib!1gZS?5jx7FQP%z(cGFt!(aqsyKx&AX3bVOkNsg=jamDF3S=mzE5 z+FHPavC{tl%)B2M70sgplg5GEO}>tOK-KQAo@+CugmxJ&K7O@kEhN#`+yO}}A9w0m zC~mrCXMRc+JsxE|Qq=%JEYu+Rn!9PMzL84NHLTFquaMF5t;FKMra)btYh-(sutZWN z6z7WBwjk4Vpto(lIXUG|Te|T20@|Upw6u>P$uM-F@Lb@qsh5|RgqT>n z_x;53vR7XyvZX5^54YfGX=$x>`VxQ?GPwhxgM>+Uw+|0_d>$M@%q%RA@jiwl?9D7J zEMcf560JpISect;tvK<+rs8g&(&UOuO2Yc`c4tP!C=-20I{$r(B#@$ zs2bMY-R)r2)8Y(nvYLgB_^PGb;y;RUk2CAhjqWy5{;xU(qBdN&m6j52wmRtWwOb? z3zK$6YN-^1&Uvb?o_A*_CPQ9Ue?UF299Z(nyS_pEDGFqVy#$sr%?fAbyM9v>c5u1O z+MGsIJPvfAlFplPed8m|?#Q{fTZmTt!9gGtFndF*O{5|G*@$~HV_TPq+GaSseh1NENj8Dk=$yi637>x1&=<5tJEji!yVP%KyA@&Mc11lYIQd#dWWxn<+BP2a_aD zJ${DR)-S#yV>>QekUc+S^XMXu+9m#$kca|brXD5V z)>0ucwQQ|IPXwhul*Q6@v5~gb^hSlGAzAKvL%Qm1(fK?cK9uG1E2C9kUVd5WM zb?3X<71iz#QS`F;d>!Y!9cpVg;R*g`LvB-_Xv~?hHo{px1Mx{F4@j59sMjk(p7L0} zpM5GeAfPEjCZ(ZMxp_i|{c+jw zpF>_2?J6z1qrAijUNJ*$`f#l1U_6w{CLI)uxEsb`;u~3b(;Ng+Cs5E84_z9VUq#`GC}8h zx^QSfMVvFs_TH$^Ui`6vD%_U%Ja1Qvpg+x$;ULMla?2;L*tu-!!d~J!r(rDKm1{LK zc>Honu=+F~op%Pd>sSi5K`FGBYlNQ35QMPf@`&SjV{UX%Z9LKyFTb6yFDga0f2+w< z2(2q~Um8`MGibVc1NY~2B$j=+EqwBHpO6MO;u(sAD$n7)2FSCKwqu{Az%P?6cfV{e z*LE^fF#@<&{ogRvA13Q-1i)Wn{i z$)z}8)&yC9}hik|eRg6wj5KQzjQF1?6{9dy-p4}-wZf~m`Gjg~4 zSoyzc3GDqj(fH-GSP!e!29|d{g4ynMB8vxs#C*SeQ-Ju<)^k`kQQg(5=O9T1QAHJl zlLG<;P#Fpb>^`@yFIRX>LK+8S~cRWeLuepDuyIoPAHJ;`4Lh3 zx;0h{coQAG`b2`mwFl=dlwaHV66LM(`M!~Xx_`NWl<@B}nfU8_BREDpS6pk6!Ucv` zyGmN+pE)*t9U@^tK4(bJx_G&7hgVN-*+SRtilzmW6(6#5RWjR+FT5n)tFpyzc6F+&B)&?Ocq~_L4Z1u z`3+^?@pq>>c!^3G0$-zwiJDlss-|H5F-T(n=3eok=*s_9%Y8t?!{PAwJtH`AHd3lr zm{$SOaN8}qZ!6oppZ+cuf|MiE8FeIrc-?Se;s zX4G6;NUw7VKj=Y~%_~NX78^+EkCe?KAc0CN2ZIZ<03%#rec>9!QM-AhQyFD|8=p1; z%YUydbZ<=NzkBSQJ!|TBTz_eTVG-C@n$j;*M3R2)ggR|Yy%-c>B>Nv0Ap2vWy~F*u zfakG0%JF$iyv25BVQSM1%q@)ktXb8!y+>8a5v03V4_v%~y|Wyfy!`B+VjM#xF+Rw66`(NZ)A#}_*u5j z!SmiozcSFS50q%k;EeyQ@pW%{ZOthT*#WGf7I*`E~qSOQkyJx(~lAtWN1RIvMv_fm^BIs;vT}I z0z0|hs0&NZ*_Hc>*=6}G#`V}VLLF9}N_OPay1=F_;o|*0(LJMv>c+f~hJ700A~u*| z_w~Ndnf#q2h5S>;L4LOA>wMkDiIX|3Gm@0q-TJ#5RDgt9g?|BH894Y2@axw6w`7~s zVrPiSeczMWh1P5)bS5u3n>XNBn z;u!-}kG)L}8g@EoV+}``(oiohDp`aTPP&tjvADCYDGNF`JZSGzOf&hcT;(9PvF_c>`g)Nx{M>Phti;Ez z;CZ#i@wUxEjLEQ!EfDMXC1(g<6b4jv7YYpuUptC`>g4C;4u#y~E)+By3VOH69r5LZb^dqrO?8!iVh+PIQ6DX@oJ3dOU+R%r9*|V~CJ6f-|kiE|z z;5apz*yXiy0@{Me^R-jON^FI}=aB}3U?eS?^();^An3XwuzYTk*MUj;Oo}&QP=^IL z)#MY-xF*9e`d=hV{Sw&lim22$i;Ig1n(Te0pE|L*48GJ?hCK#;3BxM4%GsBz&Y;Ax zN~cxw=oz-Jygxk&T}46$%Soh_ewpa#u$KX9R z4B)(ka<m&>%`C=?&^AvhJ1Dwaag-e`72fZ{#(Y z)y=p)LHlkyu=!;$n~lVMV(j*Mk40-xY+D5 zxZZIFPsHGJSnm$Sm%m7~R&rFI?7=-s;*5;52C~b~_dM0>O}3r*V*K(7XorrRPb+20 z1(t4@LdopmP0gd3kIzeg6po&ro=!=wswf9vYbDXd5Uj^qT7-5F`$Pp*xTQhIK$Khd_&efWu08xPk5hAe`mR;c#4GwxTKRO=g3Ve!55WZB+V$Rl0T0g-nv*jV0-15v4PSjz|05uIH8RS)({s%(C2^ zvl6BCciIA0g0Ms9xVA+SLaOt73A^u>(nwkE4-hl#u5MpF&a1mlPS4L4Nrqtq)q#M1 zBNPwc`v1Vt!KTSc0EnHX;N#;XK-~Qoss{jOPEJmMn956SQHO70Y@Apz-~D|6Aee*r1cy~ z^o9>a!<2+vCG6&94qL}|cKVH$gg>FAhdNPKG||Wlo^wuQ&Paq=nx7MpFRlatpK<^i zOLsRYZRH#uefIpcE@$+X84Tm)Sa;0C4ZOj%wwx-0^*@Mbz+qaYNB3i>Co0> zjHR~xglVBwdGyUA1XY;luU(_=T`xgNa_6UKs+XBihlz-P&b1P@!pElnJ%8_w0p?jx zlf8M_;C*_&dAiI=8a~Ezf3v0@%i~RCATX;_v(oSKd+k5jm04uhTfo=Ip6r1|Q5H`x zhTPS4Olwf(eyeOQsAew6o<#pIe{MxdOviO%mj{?%L=`o)XV9Fe_$@ivGT2e%OaWVa&&+H{1mkz)ZePX^dv^2-;uRSZNUnewlZ*dZ!%)l>(kB?9J z`7;MtA&IReq_nh@nvoHZ@gwQk+1ONiCQJc2tFm(cq+X0CQ71AX0fP`_u~p4VMIWo1ysS1kPrZP zE$vS(wPG_Lv%EV6@Ju0}2WE@{47*dnoJqtImMM7`2J0PrQh*9|;P z!}jIH1+JjagQC!D;ii+blM|M|SPy{hy}rC;3;ISb>s`LQ-fv)1DKs=T+TCFuxIE1i z$>K2SM+4*}VDlQyMpJ-iQU6{A_T$*tm^$5E3aUd7v+euat1ajA-&zwOHGufWImJ6Y zI~x@d0RghHv7w?WQNLOMfKNc4)N8WE1G${9XNigOnSac)0u;`&@^XB3%c-9%CY_g7 zNU-EWLjGI5A-uf2O-)Vb>m6S(-qw>+Q-yq=ZU7Y$c!b^fBUUn`0_C&5mPC$_e|BbO zW@>7QJ_Z6JVqIgSTFpgElidnF0YOGah6*)Sk*eX*T$#PSJpg@b$Q)XiTLEAyA*U^F zjD!l4vyH9oYKdYFcBTXl4$k1-YiHa$HpBC2F_;lYm~nrOKfKRmR$ zP?Vn!`52D&PnfRI0qEq7)XGYFb`;KJ0LCmEg7C2xGdHJnLMXizt>OTa+Y+azrvqrQ z=sxz>FR6@rz>LQeri)oym&*`#m#F_tPtQkPx8`@Tw#-EemoKWTyV@H~9a<%`_IQ2b z&|c$Ab~|0RvLw~B#y>nfv~;alsL&yf3)`KdaOBPmQdAV#S1&sJTTA}$%KZHN@^V^= zFv@>RvY}P&c;3!NP`280$dt8KGkT06ANJ<7$@2?pB$Ey8!AezYjs0}#_X!cKZUA(q4rNnWvRo1t85x<4jm^{^orC(`k)Sbn+}xkJ76Jl- z5Cv{{mu`4+QgUncC`yhJ6EQ>z#XuTBmuqU6=3uGXe*bO|{gf+({D*66cUP=n+VAO* ziiwGdIBpPp9WRFlFGlq9UoOA`umOI4oq(VpicIhiBDZi#^o~zXo(IMqV)IV~rWC0G z>c4|%C~Z}hUqArJDWHIW{Z{4d8s+7!_k*nkuLB-LjU{t@B03HW*gX7QIs1V*AHZag zNFX(|ic_Zn*mF@~;qKmEnMPxq+o`pko$tei5l6Z6N|PN8mQ0O)#}qzHo?EzBK5(U( zg$1xHD!w+tA>oo@A&!oY&f8Q1Y`TEg&9AyT(fy_n77h+I8H~Z1nHhlI{;$20oRn0W zwR*PE<^TTX%TewqE)D~vLF1*^EG9CA{5!Yl0%mkeIb`IV(J?Tb0Af8T2r^HpG}fSV z&fYa@a0eL)DLOhD5()|!YygmCnVFsanDeK$v~*~A_;9*F^2pM1J{7R9kitOH0?1FK zFd0;D&U{V(&?up0#Rw4JBqSs`K-=>ul_-1!0SN8o<$6*?L}YwoF0ga09J=0oT(n7D ztKH|3m71EGj0`dYGH?6O;NbJ)<0qX8EDDTaz)p017KKUE*~26tpaa;+DmAi*h={kh zH^343I5;`UL_>oxETWZ^oZR92+_a?PY-)cjN`dk5g@=NN2k*0q@>4s9fPh|42tF=u zZjA8J2b3^2R#s)lJYaMaWQwF7WOh$in#b=LbJz88YBei2V}w0DJprRG-#Kn-uJk7s z-D9tD^Os)4p@sj#*UdTTjqB#k@3!$$^uPI=^>i>7otdCvjHe44Q^u+ytiV+GT+cnM zX-HvxG~7e=xw#-aXk`Nc<+iu44*I~rz+mTPim={$yx+1U!=JwR|B~ho-Os_+*4Jl5 zga6U;TFb1BL0ge9m*6yoUeSic#a2RKVlYRLc*!SfQYT)dj4s5peQ}=hv|)U9@{gOQ zSbSl6V$L|l)PL(Qi-?gt;foMcrcE3+WsMLcOJ3DntzJhD`!9BS9r+@Kiq-6JP*8zJ z^VkRX+qLCs#N6dzp-GeXeS}*vrZiOZfDpWSy}Nk6{Eu*T`1LEXVmWm-B`I+(?8Hk~ zzv)1GGa1|KB5=d;ju$&^hcFpB!Yqlt7ROme)8LlfUmV3Tj<36(M$1|B`~@{i=M~=|=(8X9dx{ zR@*JWjGpAIP0iX|9#zb4{aFiGTWIj#&py;}42Bd69U1OQOTOuQr!z|6onkNAaB(@p zwk)k=US#3QU8BA>(es=r-)M06kT}%Z%aWok|CUU4@}okB$2FP@%=eNcRHld1O2fR! z`KfbNlc9BvVKjzVGSS$T*8pDoz*Gqh%1z+wsknfHnOBpBe?Axaq)kJnP0md{diNe8 z_77T(WKo8xM#%CqnC!3Q3Mx!wtru}!Vl!|KUMq>y_}xKv>o;?Zt5d$ zJ90<^3k@ma-z)uTGEBtYaP`>-3s9?6{#NTf;0df^&y6i!?w%zz*G}WjYaC+9NUR3m zSYu{5-*2C?o3-r3w^Ui|vU7an&ODm`Ahc;Hnhvn5mpLbC4@-ZXtx1_K$6PRpNaL2# zpF~N~J{Ak{^CqK!HeZ00QTJF#3C0IAr4+)>m;3pbEgM~W5euIH(&aum9FXDNUL-sx zI;a@77y){TOvL=475yn4ge^;^3?Q&TIR$~Yd9{}OBEiWsFai9R_(&-_*@%!tmtrMR zG?4%5a*{_(Sw`tWUazwpQqM^!da zXJP4{{r`{T1%Z@8v^>O$;i20LP>4>M_WSAUK`RVj{|&(qw%RbXNUVoE?%m?j-v--w zYOcF6+~sv*&i<=RRK5jZl$P&sFF^ z(%03KiB{Zdx@Vz4;fww6Y;poRFNVo9o4fNZ*_wb^_*W=2V#=CaitzsnzsfWG-~1}- zb&3D6Oc8!!s3%%#mFp3t$Lo5-)dzYN$R1OSnhlgm>G?>%IDv4PA{0&En!6_BiNZ5g z+Qt$KRhIL=1`tf$w*f+@UA62`3G)V8j4O-7hvKIvFTsl@RQ`Df7&jIn7;Q=s%b?tT zRHs7M!k-EVv^Ah#n&6pRw{A0$i{WSWS?dj)ax#q!VTKS2OTK-fon|%M8DH`;?TSe3+plsc87R0w6wv;G_w;#fA?R*G@H5!(sy1yz0L!bXeMZX9L&$*8n>ciLn@rbTSDhC*ey!{Xl}!15XNh19uxagc#;^6$N|9xHM%}7~i-(6!a4)8b zfxeZ@lm6g`uFrq>qb)<{aMwI;zVg9<#B91ub=E+^98x$P%nhIbRtiH@XANGi)l4ZV zuthCOTdqy))*-_~+Fcd#SsFur;B{D#nDL_y8APFyG+`apAIe_OaK(097YTV&<+i>E zO$$7bL@$QiEk}L2GNEhv294sNJuZr(EqSg_I?v~OVk-5pwf^7n=YLY@z6iwJ)tZOk zSTEJ%65;yRTw+6li2I1ATt2;B3p4+JWJ8B0KKbRkaweI<{dU#yP}4r<7I#b?K3;e~XC+Rl_6iR>4UdmVY*}FINLlA4UWc%aJkZg#2FUVC}Ydo6FCy+qlQ2MiVpYH z1*;uIZ|n%M4lenbYBuRP9=-7v$rdS>Ms#)m@*+n6zrrs`tByr~7%4qh!Ef}Lk;Tls z;l3k-hJJCXF0;4I%gdr`(I%@OHxt&9q6smdwcVplE={>pBxtO9Tk5eSUQxC82>pvp?2#jW}sa+gYd}6llRJ zH-le?EPIWMMYt-M1NlIPPgtwx3Ryv-=@d0_uGMtFa^y)oo@uhvQCmfhFyaP~z!c%7 zlKSN_Qj5W&Z5Rv4V&xH%thfbdNB5?I9ngg-D>{hOP!jFiM7)5?*d=$jJKpYX*J(re z+FM*6KaKWZPwt9IDk4azACZ4z&cDZ-BYrleV?(-ldLk(x;aEkn{r=_pPOz$}0DbkZ z!o+BCMgvTMn`vF9pvK#UPiA&-MhDGifDGq=BM9{W2s4gLtCefLDa8V;AVAzCM`H;4 ztfgaw0T?N*CJR3RZeu6=a%M3^-r#LJ&98D9R((bwa~RZ;U)dIh0?ZEMS3i>u|0A^X z*C^Wi&iSeDY zolp-xM&ghD#~m9;V2UW1m+P^6QKPbUnIxf$wd@H*tf=oSow1GA+iwI*ZeBOyTS2zK zA@#?G`++HYqR2jWG&xeN?9+#_z2CV}_H{VfB@Qw1Ns;7>cV*dpR9RYsEPuyvq8?Lm z#$4X;dV}+AgdVX%h$7}pT99m0a;l_&2?^fbHvSlaA5R< z84UaGrx~aIU3bJ#?ybL6%Z=e56xh%{Ln;{3ZyXXDN?Ru;B-C~82b;N+KoIn&d+Cn7 zJ(8hBFeYA|777l9o|abGw2duoEIb^3Zf7p(`}gnv%B6N^Y?`FlwA*3Tk4f3+FsYO1o-PrXV8-?)E~`oj`kMHtbnlA^0?|LKQC_@ zP_#&T@lQjikbvHX zlk~S0mct$_A{Y{&IdILrzoyA1w+@4^Jd5DHY00oG5y+SQK?t0_kieQN|0WhFm@H-a zD?xp(o!gI`OXELkImKKiZFCv%KM9=k2ELSphPJe{(9S_CQZ$8xD(tZVX(In#CcB5T zOwH_S7EmZq07o&b2EqkvvFHZL0m5DTU3KcitFbw>0gm^r!9f)f{&U7=%iPt zcQ%w~ck6A(Oy19J&$p7u5$eq{6iiGtzklNnLtJ(YkBlh4QbUM+v9z%Pq+P6Gh`=$s z>z36@MBtbRjnq1FBeOMS1o~ z0LkASc zIY{amNeDDE19|DODnt`g)1-`yiW}>~|Bb1)fT}X+_JTcXvs5x70Vh@BQEJvRJMK@|^RWd1m(B^NT(9wiMrug&7G9Y-|=1y=$LH z30_7M#KXWL20S`E5*iwShT8u5`sQ75nO1$YzZDg4Txa*WzTvE3(`2FoQl-Z`kh zR%C4eb!RLfL2w{MnV_QYyg2<>-qkv;C*Naj_}EHP)fH2OIL(<*@*E9d&o_WUp!UVZ zquZBPS9fd0hW7tpF;xg+XeO^Gc1-W9-_s>mCM*9TcKoCGl)x+=UUm~|utX8b1>&pP z?C3L8wu_|@z#ci4uu@|p_-Dy8m1bw_gQw$Y^TOu5E>iyY^ympHF%fJ@(m0UzLcacL z`|;yP&D7Vu_G-MavZj3H_TusWko(-bis=;9F-6_tAH1}LNMzQIAu z)iz!r@(qlRmetqKmZ#2>rw*s71eYi%D2Sg+lC!Z*FDz(w1g)RB`3v}c{WYU>F*Y`4 zvih_?U2-t{i;9Yhn9pktWRuI?@wT=$p!kkjngFQh<>h65s+Mp>TwL5ogRao+<%qWB zfqGv6EGnwo&& z7_0-vMVM5xh@^ySZs3^1Z=!mCx7r z84wucUftC|Ne&Mm(h&$x*Pu-rSBP*o_!i+EHGuhNe(8;V`5-MVZL`#%Z)oTU7!q(f zKR-WqM9}mi^5R?2A;ZPlS*OW~2EA>4?aWPAR~O*eHrE)jEE*_);9;VcH_O3-_OAf_ zECAWmZ&U?Zcf25}s<$haHCRh%oTJ^_(;t6NsA|WB+RI*3_FioJxii$x3drqVDj}Q7!4|l}*mgr&R^z@}%c5(Lc z7P&NzwZp^6p738(e0*)d?gIF)TO>=%10b_7GBI&EUc!~gwFhM$APVg`zU4IbtE;QG zxL?5a3&MhdLPA802oG zaLdA+L{1{X{pOf3QBnjRi#lm&NSZMTy#I0p4wI(3I>&#$Sj_`VGc)xR6(<0f8}|W! z4X{PPm_kHE1lX<33c$sLhlc|iiA;g@YjyQSoBO8K`vCMELg3pHigLTy9?lXBWM*N}s(w2bp7-%P zqmE?3B=D7JIkM`I9WFIugmjW<-7`2HFWrGb>sz7?-rU^m>k~%_>D+PF-+KlQ?Z5$ro&?N0+T2NoWlOu-A$us7QK z?wlgN^Cq{n^bqjUh&YUg3$+KgXB&F$KCMoB6Tk-qX4>=JP9_7E0>8}?hmw+#kqy4$ zIf$-Lk6QQkzJ19L#@8pS&*07gdrP9=^&Zeb92~a;iFD$at0+N2VKFgO3H{=j41FN- zf#=_|<&c}3tMSq%#lg<*0~_08c~HuaA4{#C^-r9&WyxT@fCK{AYEp&wj`u1AydJE) zyxs>hW#X8S3b!)@2#4KY2)anb-=iZLa`b7&xKac~b@je22QbMRwae4f)02~NBf1rh zY{n-h6tN?mcSZ=Oupw{p@tZxa_5nTVd-g>T>S%s*|HQ%b-%t+F}rc+aIsBjEu+=`h}tI zNC8hZU7`+J0D-w?ZH$W)OgqH@==!+|eGnf3L2n);laP|q1m+AvP{5MEa~w&^OXnq% z;S?2ZZSBVwUBN@4Egl+LTt5qrWoy!fs}PjcpzMxfsXC*k9pg^i0P zuj;_}jCOv$a=&uYypwT%%zXkB1fI5Y6;Q}yl7O*mY-$?Vaxs&>hK!c`o&01P8yC^A5mYbZ-{8WXMlpJSD zH9+0~s|=9FutiY2fUlGaDP*V-4Xu=mU2eiN4Mh`MqQ&F%Ic zw&rs%l7c27Pv1yS}_EO;+A(i4`YDMvij7*alLXX|F#9m0n(6 zlrkw_qgDdm+<^iKhyXsdQgX-iHaj1vnwXe`6qn}w{3%i}31nqjO!kfL7bY^KS|GlH zPcM-`2_$;Olzh%EIk~w&U$Ba!JTW#lS8a+4SXm~flc^n%+;Na$NJ&Xop7%g!x091m zBj&S(-u}&*eML}-+z@g-eo@cTrR^oEVL!n?AeL z)U@m)`@>Zgj~pX}OXuZEJcVz48Z{l+`8N-eSIS$@$A6#|NApWY#b+OxnNd|>=-6`b zjo?U28z}iE^b6S?l@>5Ygm)*jL~46MyE zLAH$`c+Nd4S-szOi_9aBce8am?TNcw{y8m@#h6)`e}A7{U@88IzpLiwjRT=o(cb>7 z!F~gzwrE0bQOX3Y6)Xi`?FXu|zsM@~2z<(cB8}SE?&mE*&U#4oKbGsbUZtmX@lrwi z7rGVXYg}8M_qCO%GWM$4V1sys`0zc+i7-FbboM8?y`lW?(6!Fg+G)@<%>sR1tKMdQ zZVqJ1k>%x95Pt?X!aK#HkGSp6YQGy~RLzt`Bt3_B)RTB)dc7^&BH;HC5@l2BQ%8i@ zvJNw*@lz#AR%WNddOl|}D>8}ja-;P-J$IZ^`rLsLdx=r^*MzKQ@}T$y`3&@A14+li z!h<@7V$z>@(Ahxk|+zT$R*gZIrSSLtTB+iLeHENGma%DXk zK^6UQdj^^UfWi|<~sb^M0!W%wp-Rq_D~FpTMt`` zBBgB%4FS$APCfyOYF-{5QZh0&R@REd)QGnGZ+6(ihbVfnii$r+?eY>0O1teMSYMsL zE%;TOF~k{r-hl90{)~Q$0-~&tJowmlS~ElOZRgqhaQ$Cu5UDr!b&9?$Yj~5UGttcm zYCGS4ok_=GSErs1y5#gbOJ99+0*>3x1llR3kMTdo6aG2NL?%0KslY-iC2vPDAP_c| z5It-ZJd-@t?}LlJfB&5AOgo6xUm!AEO;1g&$?PXefeBGQ&s*$u&@nt}Y#T}N{Q46g zq1``iPc^aF{yeZt#Y6}i-FZ>Kgb@4uR$qk9vsm6~p3Ee;x7Hi5g!n=G2th&K;*#|| ztQ+h96zubiKQ)|7=ich#TdA&VavE1q0v|_@`?UAQHq8dc?BUS$Nfb*t`bWxP{Nq+I zxtFV90Yr_`S2xsqxT(!OEjjkPMSOhd&@0cINz5+ZUiFgqeOZ?6Heao5T)IPAj}V>(|D~~fHhCOP z{j6Ez^eqstaJ;aGXQZ!D>mHt}V;8OZQqSJaNb8^+B2c?$JR%iy0!a*2R#745^#Ira zkjsFw2GmR3o*!#nvg1v@$Y89jeV=)%7@8v)>isJ?*Y@N{_M~Qhe<4Ii>zBb-%gW`@ zS4wD8u!5AzIhCA5q-VZ6p#186v_0$4wuvEl@FgwEQ-nQujmav@t(P3F2zherG3QMv zwRbmAY|{>WJB?kT5_ICyo;`=W+OK{@^Ww%#a=f#~D)oFk9d#f}=-B5JD?gGk!%+}h2 z2~{iwnVNp{wM*;yi1K5E!847>M=UsJ1$)uPYY4vNBs+<+iW)RT{KTP3CAhr0(xiVQNp9E* zTSLjnqDimmX2vb!sv$c=;88-7{zrLcznA-k1efHz`|qEMnVw^s=T3Jq#g^XMq&ITp zg3cNpgPr=O*CN_nJ3H98d@1s}m#$rRr`nwF*ek5>(!VY0(x*m9xPDlm#m=K*-zJQO zlgV_kCV|b;yyY=|l$))8_P9yIUo>WT)TAmST!FTKF`Uh(Ih{!{4hKYb%Vi?P!k#ba9W zzGvH?3;UhY&(LeSlC$YmB^{VuNHqtN*WkYsinU@>HnCvNpW0fnCY6h@275ni2xBMG|7c6HkH^fcBL3@Y)+Z&e#cWrp)KiO-l< zhv5+s80hE~i6C8&?(gq|?Eu0<+7JM@UIKA=_`t*61=cNRGWdl79Xk#KLm4m|(CyC~ zgB<}Cl_8yPi=)7s|8wWyb7%@voR|l1l3lgX!V*8DhNht*t;WP$0(_Af)qxhK5}uvZwablc9%Oi-Bp_KiioO@p%5{m_1cvz83i}8m|;Tu zmwv9tCvsz`&pzQXu^qnFVNa887+rZH{R@o!+5HC7$IsyPMHdFl}$%ci|OT!DoMIPlwbGBimH^EchW zL`AN6$bP*bK=`!XRu@?WTIKW^b=r(L&Y5pLAB7{;72tD=&==jMaq+ngO#ZdgCW~HE zZDb-9^!MYtcXIHGluP2yZts~np_sh3T64E`9*2S8rN{S3l*G2Uk?yS|iS}*sjX5~Z zz*beKnv;eg3e=&fo1Zpmv711a1- zo`-4kR3CDEK5TqnaZ>bGisNUg`nS`&Ef*3ARE|Y$7}LkOn>BtN{e6tMcUR7azw2%jbyYLir&?71IKLbnHDNYT?p=Ph^gZN6ur~kr@4bNd=YVSA}%nFvjvQ=cR#t z)s5KDSW_bZ&waJ+UpRfqE;cpOcH|rE=lK0UTj^{Q;Jh2wzY2VIfQ2M$?>y`9&Ng)0 z^q8X})el5<|6$R#Sq~FYO_PDDbVgvoRo76ZI9rh=SVo=yh5h~8P%Caqoe#2_WWExj z2s$J-OtN@YM~BCzyp8vj$kAm53Pk~h_Lb{^M*Ep?I%B-a@oQA#X)o%~<`JogQv!C9 z+g~lAY@VwtnS)ZYlTJp8@*>>)#{AD&k2zjtCw4b;tg0+ z?T0G^vO?;(SK|AR5r=Op)bY+OAYC+!hTM0#gmm|c}~54sE1h<@*Wd78D$`}4@o${*Br z_lqAcjv$^mF?RhG4Em1uQ_qqc<*^PfR{D&{RpdF*>96V6Z?Gcu#+|&?QHAfa zd5vS0dT{9aeM9YlKX_wat*8BdL*}rfeR<}%w?9WVCR@!Zc(%94dZd`h%`+-=i>8pBc9FQ*kSj5)k4vVz?iYWx2ONv7c))Th7%Y#=I7_bJzSWRrKvFiCiLYqJEE4> zA^_+_c$>k$fKvid8@aql+1vZ^&mSRJ7m-Xp@Bh^1qyU+tl1&4fR0MYyfKg}ixK|a^ zj`*^o0*VZ5QfT()k3gTy?V;qkfnm^8MmqIV)UXL%1zk&PYvoIE&)6G7{JfoNi)p(A z;1mUA<;{bG2$>~qfHi{cIdF4eo=m;8NZ0=Z1M9P4bnwx7ffd{Zz!|CM+$G54K^#9> ztd~<#!qB?gc*WF6kQx6(?#8nMAXRG20Z{gHHU9uc7_eCb_z(cNN>t1K^jLocO@Lmy zd8?_pIyeBp{9tMaxDwL8eS?7jG~x2}^f&k$fKvfnpO%)^#l;1%SLy$>`-~j^i*mOC zFbMz=R@zAQ)?c+5L51uiDjF61%G0Bo7Z8{BvAE{~VNNFt?PVbCxz z$Vf=Mz`p?7)3Rl9Jzk=rqr+!0kuC|=0-JH*Fabcn)r=fDH}^dN7@+;LOvW|n?DwZ= z@bU2h4m`6MR9;>VZapyY)rzeJSK-voyXv>E0h*11Qr7Ip3Y-FTbe4eDp>{KsKnOu0 zT!7;Sw=bmTgdGWJov0`+PbT0~xm%B5qLfVw4REDCU9m0@q)h~x{ah4rYk)e2N1ScJ z_Q3;CK~-5f3+0_n95#@IM@2@;!{>d%L;wUMzO#VEWUZwdz-gwSIvFO;4?nT9QKMJ0#}B+(De|NRF0o-Z!{^X+A0XY61)U;}V{1@NMP&X}ttbp}in ze3-BhA-F;_WBEmJ0B%T9N(!st9n%D}ZktyaUBbsD($}EM2-bQjF$6*W1wV8D`ALNPw175zUgKJtTz^y-=&Ji=3c`69B^)X=vPmF~Q8NFeV1oBETIfp&uZt zPR`D3A)OMkvL?*Q)DiKZFSt7WOeh)|*t`Q?$Bl=`;$2lIId&3F;c`i0Z1U549o| zsKdaF0B}?JD3l;(zSn?(NlHl>*ty_VvvvjK7ia(v9{fvVq@2!U07e6#@#yG?`_`V% z^O~NFEH^7lfHcmDQ4R1U;4w5}5LN@K25i@hii&D$o#FYX00@;=P=JGlBq=X$aN z_`CFyT)^J5f-iRVllf{YlX`?dab#iu7Xy6d#rZjC+KzMa2i; zoUuy3&H?Zaom%;!S@{o6N6JLW?iq8Y!=odxW5i;DqZxOs2f?GGSo$bbogUD14c_-I zAt}>5;PwE-UMK4rmyqznD5BJclY(^w201SeMyr4@_3z(~z;IVxNlY7nfp!~w)NCXK z%#PRfp++xf1~^q=f8RnoZS^=X17$1x^3o}?_DD)ZBuLbrZJTS6CpzO*6$`Lp!wjR z;NWZ|7>k|FO(z01Oe`$GSZ>>zHh_LvMV4r(zp z-1}g#l~`(!9qVgtpGiVB!6MWf$G=w4ZD1cld#|mmJe6bb?%8b6RX0_tq%;*~Z62;c zMYM$wMEVqT$EDd5A;-Us6Y=#KX1?*^M~b^i{CL4bU2(C6frBs zyw0suqqcUSuv_FPhcn58I)A@k@jFEr+{B49V34-Cr2w$SdbN@LpUjWm2nQq(;7Vpr*k|)p+gXAdYMx#P zvRc`y?lV#?h=o|_9kpKn&|}0#tV(!ZVCzqNnAgrWWHGnm-xup0AiqC#I^$Lg?KY@X z9}}PwcR=F6=Uct~yyKaP9j;V2->OBI$8w(*+dL@oD<8MLNx{u_+An32gv*4S;y#Jzc-rt=}lz=jmej%+oB_{Q0o* z;LrEUP1g?FLvquF-gL-ri=Cu^WZKD_^Ief|PhkNU}I9n0Hr9QYc;)};t z;@R>ZOpVGXP>PDKrZ{vdqDOMOP!XQzRl{C5t*tfU!L56+{b%!R&$W4z)s3#z0w|DJ z*Mq}h=`i$$0P6nx&dG0}^Bpzjksy9THf;4UoSv}Gf^~Asghr;&M^|KbPUiLV^2*H9 z;4oC}vwxrsal8~?N|Ku0&UHSxDoMw@C0EH@6BEIEzj4Wf%%-We8Y@uy{*3D4XImf) z&m(SI=gPudS5C<0-IbBPOjkv2`*u%zp?v`Bn%B^zfjoj*m$#{SnZ3wo95sxr=#{}R zr=7?BfhqFI-dl$<*Dj(fW@XE}j~~ceAfhu>`Zr!+$S=NPPwr zP^BP#67a+q5kW$g!hI2o%ylMqn!}e(U4v*w>F0{GUa7j=*3lamcV0f_@{~Koj#Oy+ zT?0z}$&Et#KIbPs$T4veNHIG08ZT3xdXvK1zNB9-`0Mi4#hrv!Fa%PFsyAB9cZA^< z{KHZ+E`TXWAtU9@)ed9b;(ngRwlM>OF}X+yv82cj-IR9^}i7OsJGe z=;(sfye&kxAoJ2Oiuwxahba8M8v8)GdnyE??&X^5YZ1;>nQ?%pe5uA;hlCR(HqLt1VdCzgCeRQW3X+EHEz^E{ z;@R?bYeUu>Klo?On|9-N^i!r^74HW+#_0Y*Wo9~$FD~%$@RCi{#u=7VMJP7@vAbmY z7<1>PFQNLUBPC+_;vP0WbY3bt?|WbN`uvscPT6*p_J$$+J_gzCnI!`&C^bsH7{Pcb0^y6$2Y-d^gd-(Xc(T{ClNr8+q^qznD>Sq{Qpx_CPWzEOa zWiPbk5iq7a3}z3b1YyglzA*81%|8w%#H>i}PtNyVIW_xV?y%cT6(_lDIPjF6d?^Vw z@uNd$M`1Sp;AGAh))|n5Oj!h3o{Put8v7Zm7gDwJ9VVK2;wNe-+Z~0Q=&J;A7d^c%s4|G}@}#nNgIiej z_h-DkxWBv&?08r4?-S(rrpP3WDJn(RacE+dmrlPe%M%X#GCeL*f_WSh=ziq}9oVtU z$G-oF%JM^6Ayf(nAqqQ<|0B8C2MO5y!HML!c;w|=qL=Un(qScRTH16($A<~R%J196 zT^=`I8Ha-iTQw=>EIaf#E8N&q-Ff*z8r1(n-HI0^Jjh8w_?E8;ZvSGlBs=j z@)$KFi`2bh{wI5&e&gR?JdpS>CMjO7_3h!uXN<^m!z+)}#juefBbNe}9xGp33W1f6 z-x&h+nfaJE+O-w*zK1if=Y#&NqUnPmE8o*SXO?y7 zXW>o4dt5~N6&D-k5ep2s&M#xG&*Fa6O&p^Q-02_Jd%w9#8SN9Yym8v^&k?Q-{?_V; z0!8aU<|UJ)?ZwSzRBy+Bzc}s`o@OP!(T1#_4;3di`GD z5jpwDp7Ge_ z>ni*3wEoy~Af}zo=Mj&X%l?(ha{#KZX;X|+*`QkW&8N$Iw$0UOSyj~Ceuz-+k-?3Q zbVbMW(zZ5}g7c#pT_GqF%XxM%?CXW$_M;xMCexB@4(JRnveq9)Ze!nO`lPD$vh7%{ z5V7bi7nReqs`MoUIQU)d^S$QR3$n)jb632&e?WSIs5n}miQT0);&{D$Xz#P%uMClo zK*eP}H|=OQA$$}cGu#yKw5Fb<}q||S>##_1_Cj{>Yv8KPI#8_#V3#!$7{fnp)6^Msqb`XK{Op!T4>0{i&^}hXLsOWIL=qBb`IV;8O)Gov)KuR@f5yJHdr=zB~ z7OXAEjJn7jQg1)UXM>?7IAhoM>uPV4Q*Yu2?Rb$A)TU7tZ8B^3k5)p>^>8>N1tK*g zD5!HL4=&H;Qnr)st-V{M%7E$<(ViZ?1s1oo9mO%&_mh3k+wRlI&rj_LQh4|bP>_BX zntLswz9jzq*HGdzmO%)yeFrfptMdUy+bm%cv)o;tun>nTxIj+B(|vB&l)dm2LmL6? zG-*{?Rw(j-crNpK%aQABKAf^YqydLc{*DQj{Dv3ml#oJPya`Cw`2d$o&sc1_R5}tw zVA{sTXl^qnw1OpZI<|+d*435$!BzS*bS&up9R95BLG~AGVyMi_LLldfTSS_$mEEeZ z)h|nm4G}}}xrqkv{3uwT74jZ;tO!{~!NDeDXLK>WGzZ1*N0;ZPLhn+4=Nc!&0?TaP~2T?umh6 z_tKD+&edC>Yq8df*fuB}zp|f~$6Hh(OrWZw08p#1b4bGk`3K7(T&(CdYKrrR$lJ3Y?$Fogl4k$aol6VKcvQ?WSTPlvR@kXZFOX$1i=$BdG@T?OSjf}P zxH;XoYf8#@5-F7^ApdksynZ;%^5~ur{nbmt_g%7#LJ?H~h@1V2qhzd~ zGkqE8k!}BK`1qf!w%sv>K=;MpEqL?Qg75)~GeMIUE3|C;S+@1?plvqmJTB_ri{YQ_ z9C>0!cW`>VY$v{{e!0KPL*^QwOME>tY78nR<%f&*Sng{ybD_1s+zYQZ7n;a0kJIBt zCeqyaVP4Zqi)1I{z8s$NcgcUb4RPq37^!g(+-ITTpNr8ee^m-PNm;z+T3|YF{sPEKP=2b)9?rL4DC- z&)J7Db5w4Dx#~^X=|79#@hso@S1^(yI0U^dC^s$ctjDcJY3v@=ADT^b^`4#ma*wuI zDn$OYwyEl2ZwWSLHb1t+9(GP5+E}GpnNF?o)a=hJvP~RuYpil+fdAuoicL$vj$DLvVr_$0-8~BnH;uTN#%*#o0uX(2R-yTA*&`6Q_06 zR9#zM-Mg!+0eFS<9>Mb%%cYm)AakSUxogIpN|%a2{nMR52zTiKB3~rp99J2PN&O2N z2#~TDG?0sbfGv6G%ZG~oMKo~gC3dTS0ao1L)^4~9(RqnegCKv9kf2%(6Jc2{&T(xOIb|-!!HsyHtkBl6fg&3KdhLoG>0Xc7h9}RIG-Txs&k?JJ{bTu{Y zC;5cGEn-oan|mpXhS5k!m8bZA3mpG@e;Z$ZtD~uCh6SotqO0@+Nw{VMJt^&KK5964}|S zt2NDxcoI$znX;*Ya>kynWU*Pnq7`RfePp$dr}5Jh@`8Bwd(_8|Pj~48Aw3B{o*hKg zM%ipCQ|%W%k5@cjlg2bJH?Ac~i*suI`7xNpvg%ymEF(xrz(c`6&VyszWmhs3YsOmE zquVLL;`%it=BZ?L=)Vlwm}vMXgxtwK2A7~+ee}zt&tzGV2qp+{W#e?|W_xDezMm71 zE2mu7bWP~usB{6bPr!VDz(rr|K@)`rl(XCK?Oe56#Ar(j;Mjc-@ zK1^k1beRulOZxw4;YbXkcQ$A{3`Ox@Ylq-|zSn~H8jcM0YM%MV^CT=JPYi+d^STT* zC!&Qy8(`XrWYjskbt(-+HLl;;Dr{c0Ju2|R263@`XNvhT*5~WV_}Yo5x9$0wDQRq~ zf(ZhFF`a`xiDFElCJy+O^!(1K_hkMmA~Asw+LbelV>cZ2gI1qz)B-ir_X}Ud(i;-* z{gJod;5rWfvpWBa)dS?-7|Ify2kQtsBdzLCVZt1+cC7^D2;vDk{%i?}xTVRFUmC0l z`m0&mFiLmv_E`~UD?E{r1uIRe(1R!V1JzL|S;Mg%yO?FsSUa)QSv2%%Hmz6cunMnQa?cJPOO15x=eY1YKIq^-GgW)5t0sY?mJxo2{bG_pVc@Fxf|6#zJrnPVO@D zC+x{xM-^?!lceE)M)Ogo2q{&C)yM39kK_tM-?n@_|rc67L_vjI=0UK-JXC(J_GI0ml!D=CZMn8xC)d zih|B!pif~hClTOWz;V9V$Ws+2Riw?N;5Y~hEd%fuVt@h2$RIMQPsai>Q9wb0zzduU zbSx~@(T+SwaGxm?!1;w%DK4OA%#YMm*R&1L!D;KFl1tt5&&Jum%e~L!vFJk|f z)@B79SOFl01}sCh&*Q&n_gEl? z0s8I2;$ooBVP<7juJfrn0u9v~eV_RtfNOUHM`vtuqJJ9`1*SjX*BJo4r?l6ufkG9K zh`<4}t8D?yILWE0z%W!FNDQ1U+aL)7ex_@E{Yf%t2^bR6TVp;cAC3a6Dl7z2d911!7R4_8+psQ_s}`6E zw6U3)^3qaa!q|oX^xGrNMgim=gD_(fz*(lCie!M(a;IrNvIYu}w2DQT&cE#-cWhugq$0_fwyS0?r1gVl@C&uOF+*YnmYDp>n|fY+R7IOnWh ztf^rI7bPs$o&FSk4Qdm>$yFZCOkwI(M0okVsOZZX!Ate@^}VjvLaOhhGOh=1@4{D# zsZ~|XUOY%epTU93CiQ@!{0{6&m?yBYeFe=zBCx*J*nfbVMo5y> z2jV6ae}OD{YVhv(!1b1&9}EVHBu>1+4+Su~4x0|{h9^EWpc1iaq{hb6B~TdVW$m=y z?c99=gWcku?bSQod)6h_vDM=os%ij)PH68e?$IiwohwvX|(`$1318)pb zvE{IxhcEByOk%S92dk(TTnk6hp6+kc>t@*pTui+ltXiN*B&60X5ZkR8Ob${}wd{3`WZCN5}}El#kl&u(jK0 z;%kjC^02_v2S?*rw#MB5>t2aKb{Z#X_q?sr^=u8(X%BDNXO}Y}t1DW}LY~WqO?_`I z)XA2RtQ2F z>1bO{El~!Q=j-!-!GyUGcjdoADT_66Xumf01W}K|-exjoT^&P11oFp2(c@r#)HQWP zQ1-Ymt>KnNrKnucVcxnH1&J#g2p2#>Ac>;onBiFzsbdw${QOZ2iN%KDO-B@ATDX*Y z_`Jd_PtyOn+xNw$sn}M~nsY|h0%U9n{m$Qgb~Tte_>_*-i^)45sxxSb>tzN#f>C{y zlRe>vY_L#^84jeZkAvFHbEz~fAT1o2c0-K+R&;F6+5W}R96Dv;j){}9G2#p4Td`JT)tS>ri*V}-)8BR_mE1|t^~jddXSwS$KFF8V2XL3*7)Mn# zHQxtMV(VWXbw$rXO?Cd*Rsl7zz^Nibb7a1W?+(s@^NA#!5dya}R{HIYl%)%He0=hhFQ$X0{1-&JRfi(bF{uDqmh-T+jA_`Rb8O!xWg(i>h*2De3 zQSN^>d~g5vCv%-pVFO?tevznXYHET+^%B?M0{xl(c7jv@0(wevau|qkKpg{GTE6@z zUuKNdPptfVUq!|xq8wx+R#e6$d~ED| zU@I;tph6D=gknwB(t0J+abVUJfGmCuW z7bD=`VP6CvWZi6Ty-lm+Kbzo=aAwp&A z0PlQHi^u6e4ta-)f@z}va*7PhDQBbk%hJ0{elwmH6&!CfdE8)bOHhb z(D5=R>8&Dnhw+DZ!0k&%hp42LcRi-9rw7c>#M8U-e+M?NH-S47m@9zcQwFL2zum2Y z`0IH$30NO4udZHB?YcRR;&LSg+A0hT482wl6=3Q7c7pouLsm`AC9tN-+*N_LxWND2 z+R6(Ip+($iOu)thgbx2Ph-fIatv0*b0?kvt6DC9;cl?L+HqdgVoQrzER4=j8z%#EaKXZ0DT~Ruu3Gv02W* zz`%g=qqX+N83;=df~u_MnZLDe$T?TcAA)nq8j-JSO@{G+=q>wmNX{W)8tAtS?BG5( z%dTauxxjF5yHGR@xrxaq!yY8%HnN!Cz~2>3 z%)esdsaDC4ygcKBzL1a*H0`S7Un6fVFvcvP zqw=SSTF3{WJORdT(4-Qxy%We@;xwvFYPX15f#HdWH+;a=2)sBS@Z^13Ob2=;PtCaRp_kLjF;M7;rjiD2^=K_>O3jYMuT{t@aYpNYIo^G25 zVUJ}$q$k&O^~2bHF(n!iKD(4A0PbsYcWbBGqWpIQgC6Jn;yfp^j!&`4Ye_G(W{34J zU>op+@yhVru1UQ^M=&<;`IvoWWTbr5?pJI>{UnyiYyj1~2Cf#0(a}~o6h!y2wWGIt z;%k46_X&ZpLafnKrB8eUTY~w#?ZQ8N_bO2?|JkJvOr}y8ECofY?H3XQDcREbWx&K* zwmh}WRPhVM`xczIYd>ckC3kZ*Q)TNVH+n?B+x42~(VXs`6Mk`yPkVQ$g0m}c$=TU& zyDyiVxhb{+e4jp@oYMO}?6MtfETMK6kf+&?UTQeP9mLVq(cP{Iu71Dm%FnZ_*U~;; zbkiYKqq(!ZI_H)ZJ86}kTVVO)d-RzG@-10rad6lxu4;1sdOyq3R$e@a8CIQ4S&CXv~_b#<+u# zJ2x|=kN5ui6LrzsS8vIGQ5exiNya81BNZ7z-nY4>NfYtRE4=#sd{(uD4lDZK%+@SZ=eQ7DmQGHCVYjN;0C`MoIX5!hh|6pXfkZ@$vznPd4mMf6{sg3-vA%-C zHC8rKQ@^u$#QMXVo)+wb3l29kVG0yYNZjvQW_ctU;%KnJ#Kpi1I;fShVa#l!==If; z*CQ1Gw+U(0jgabxfduY}VN6V*kCaupR~d|No4TFU5du_&@ayaw%WYp&J-wAjW0`!? zR_tqJca!aSbJiaws5TgXl(x%GW~ZHpPs%MJ=S)K>;OIFBEM$nvy7j{w2#hDMRYLG3 z{Gk@sJeEV%@Zqwl>8nD%2lp}PHF3v)k-_8QyE`ukoWRN;ba*nbf$62CR3E0@btDYl zTV#cXe_AAQZ}FXRUBZPQ8THb7k|?vG67ZQ(Vq$rw5Kx2O+}TQ?M_x(lqr`7y)cWwZ zi%H^7?Y54B^pH)W!_M)39>0w2bauPjlQ1cKpYNGao{fNyYh`OB>b;9` zn^qrYPxFZ+GgJAEN&6dJHva%c+8R0wYR}ty#rA5rk~Umum|Cb@4JIE{U-fcDhl??` zq5?}jzu9riI^+A3#r?tvJX)fM)2KxxACB6tr_PASK-sLAW|obZP<)E>s;-TY3xWb< zc@pvS>WeOj{8fPCT6TUJE^=bmp{j&C7Eb2bLqB@KJ4o#3lWrW@J7nZ9&HtEOt8mo( z{>A=69jb+K^X{-UkdJRlEODKpVe({#g50hlQikwO5&o^)oI0U*B;cWGpnH&-+rSaV z#^aFIB);NTBZI^$^xDkZyROiO5MeDmK0LeTFvUTZN+i5=(Fc!BA$|{M&l}II(A~;E z-o$h*qKeqB##Gz`A$hQ)thR1P;q05YA#lejRph^m)X-|5H<#g;`eD{tMq&`_mOM!4 zb5oH2zi}pPmhIF?wLasMaQ2B=8n7V&T;m*H^U*c>T?z0!18K7y;Br=nzt(CxV%flR z5`f#jvp3rU*NJb6SOeTpx#)-NWMEqcxNmOLeJ}^O-FaK>!>hoP8A@2&qK_T~UK;?cjCH?j2z9Oq z$+{ih=Q69WxA!dYNQhfS{WfyvuU!+H`u@a?+qXYg?AdlF+0UXAf;X~~z#%E%*bMM={2;BV+YY`9TOaop z*q*5LVLyKLYANuX@BM#1c}u_X)(}}2xmit1iz_wXwqRw*Z{SfrJ8FIw0Y?ddi%vtY zp5Jd56C7*|JXTz9`s^7qbfUNMu)Y@op3nUrxToA{;g|RK`!iB(-7vk*ab^|v)9XWCYc*#MB%l!Yf!oag+fwLOGiH^&cE;#`kST%O` zrlwQRKi}@Q12}F3TzC5R`g-|$`G*R+fwL$IbLC{tfoSORYGC6LlLsuEK})Zpa#(qg mePF;D4V=5eV0*+E9{gtw+$0;NGwC)15O})!xvXL4;ySoQ>ceez0*WeJ`J-EBOLvVL@clY3LzAJ0pANT%vXDtS1 zPR~qDcXiLMUAqWVl$St&!+`^VKnPNjqRJo;*dg#k3=0k%A;(Kh1b#s{2}`NM!osfY zC~O0}$j)M#&MJ1M&Tc;)O+YHAe^?ra96zcaC`*ianozYG23>&}9{UDiD_R#J9T;_bV8=+UKM$31ICh)lD{+aS zpm;3C;p;m}PM)<8M8=ds>Q!gP75A3gmN(~9JZu1{APNfyXEKd_Y-tHo#NW$DKI{AQ z1m{*_Vj>k4)ep8`ILcVykV41}*1!gm9`}E5gvP{?A%&*vQ2y8Ee>ZrIAVG&?h8@Nn zXB!u6ur6@;&c`P}g~X6@c6AjKC&Ukdhe*lA)iRJF+Q!L(6|mT6%yMyl4h{h^ps-^7 zn8t3kySvN8GHmvDWF$1xYhy(NpqB)qGOKU02a`yNk{Ye+IZXYM2@D9D(l zRUw<7nVH$v*4CN>w=ckpVA=XH017@B0rwj&Zh3Wec2-taa&mBTRB}{QJ*mibT;)JBjqvG1;=0<3^&m`uSmWPLj zlbO6v{b6WWQCZ?so$r5uPew*YYU)b#)#=HJg}M3L>s9x~zpC2*@u{j}pfFGGnD5-)5<$t_)dV9&r$}&ND=4ieC`)ALE2Z~KdR;L_m za)E~w3Kb>s@OW-ekPVZ)E5u{ z?4F0B*Ac#1>EOOUloQvA6L(p_6u`>L@Jfzc@7nUI$76>>tEG&+IgGkHU!gfLp)eUR z=#Z%UL-wF6l)AZf{O@FrN)9s*CJHMoEUfkRs{37RAbxuZ^tXgcg|y6fy{ogN1Qxi$ z_QeIgnilmJ9|VxJXq1-4OvL`aO0j${oB2xoa5b;bOhsv)nws<^F#;Ve!|>SaQMNA= zN;@CmU(sXiwBD55Zf{AUIDz4Hot{s5Jitkk+Ap+GWpH6UWZ0mA(c4hZr~lf2VboW& zECX7G@z5-!S%M^GWkvmg{)+hX2MeTFR+ZcUp;BJ;D>XSdZ22NAI=V>mj{$_=KSVEO zWmLG|$@F1VQu-61ziS=eW&3RvJakXHjsBZU7>8jiO>9!`MyPt23z;)ARvnJ-Qdho|X8Rk?ncj*HEg5L2B z$+K^LBXfoM3SVDW#AXusFbypPD0heHXu67cf4My!lVHV5b$aSKUEo8*Yyx(c`cY3^ z715YmiwJD^w1T_Neqei-wHwdiwtMWU@YQDZX@X2HQVFS(s1Ml-3x(T-eNggnRm*$K z_;=gJa}WV{JG-aPoESfvPORC0Fnu7wy2Rv3B!Ra4 z%fXH7Bt%5?6u-}Jco6ct_`q&>pE2(9S&B5XHefOK=zSn>(Gq`^WT%VU0;7ie0VupJ zax=&99Cg6y`KrG4v<+QwOq?gaN5!`54~dUvRys<@!Sv?X6{z5=_`MQwDLsF))y-?A zy`BfC!Mfp`KyIPaAm%)#OqH4N_3o+YAhqKkXX69`i{Rjc(-yv$%FR|qNM58H2!RJq z>R=ZsAqO1uMQByX@P0I#;9!wTZtMF}syZHFf|4o(ND$iZcDzq!O4s*_QRASM5P^bv z5H`=`iD_)|pjf~|8A?IED!=;0X%ny7{F8oZOGt|13L)}XqTC&0fY##hY@pRns5-kT z99^IpIxwsu^A>M&?|xj`eb+<(=`F*io$~rH->2;sO`?XTzb@*kDf)o*=k z%|Qo_o!H_m$U%X!ZOrbkwJ0?C-U@`i;;A2fk+p50SeLiydbhzv-}7{S&r!j22ifB1 zJ<8Ouc8PFDvA;FVwa26oNh`!|b;J1-SDVIs7H@xR(IOwHu%rs~3WjW-vtDhdf7@LC zawJo}c~~=x4|CY6ep~4?JU36MO`u;IXI4vw1}R^*D^IgKxK67ilVEJcM4*;&`(Cw~ zAO5Y~gI@irI3Ni01>W4`>Stdiq*%S2m8?mjWa{VJMHfFy5{LaowXH97OU1J*av%)^ zD3;KTj0rnfLF;EVNbvg&Z|k#lQ`Vk_Ct};hnnhvyCk6iwzH2;@O-Lf{uNlt!%;!4n zNeWPso!1fms%N`#f8NRKhjzmeB@C{N2?909@3>DxIG$SG${H&i^sLb29=uU~+1|Tk zWAL}5G2a-KpwPt7&8>MXilB~0PF6NDBqT)7BDVk^%Rrn#PG0}}4AE?q*7l(~9;Qf2 z&517d%$6xZ&a9W&5+H&D|yZ z3Zf3El9Q|Krip+RH)t*r+s>GJ*EHz&-;_AoQesWIZxn{jAp?fg!()`n=;TMSWuW*G zIF7}|W##~2Ep>Nyzh`BVR923QO_pbtqLGvL0W4k~JRx545cs%PiB60Z6jv8Uq?XbT zqpQB2n-a(eR(8};g|%i4C3FJn(gSJVoloSOk=vQuQIX)UJ-iQmWx4MQ8V2OU)MQjN zzALUG%u?aytL2hPD@Q|wwP72L$JBvur_?kww~voqPDjyo6<@_vi!|@FFt01YsS@({ zQ9vN8r79N}mjU#1C1X?5;Gm%Qhb04Ya&jCNWfl_?^nVplnk3|U;zj5hYPi_ga%pT9 zA8*f8aN4ezL=+Sh92^`xG-Y`W4frUMzDZRD1@khU8tIId#svs3xY)S3^_p~gdV1sC z1Ot~F?Y{qZV63dpfHBf*btDJC10Q>1*)D7JJO!AR(2QX}EYK7e7Z@Pji<8sdU|>W2M1vg@s3lM*7e{4(05XCw$vAb1l$t_ z3hMoKj1g#s%W4`~fM3wiaKGAUg@%SU6hqi;I1s+m7b4*EhSexCCLWt5W@xxuqLiOZ zr)6klln@)ceQ-cdO4^Ese|CDB#p}kv#>NKNvt09&R=t9XhDI1=rVgb*qQ+v50y?ng z=&oA7^Vjq3L6`TdYgk`XbMw*NvrdyWxBWKYe>W@jW=hik**3Y}<-PXz?~kP_Jz$9B zq@+?=Or%p@zpOReDM(ARu&_9~x_+FNS8sNB1lHx5kV?AkU92@%RaNZ`MyY6MU~bK+ zC@LQ3bq@>=9|N6l&A|(t^4AZn`}4z74uPTJcXAZbMi3gMJQG$VaD(@oAwmFZ?#4Ok z;d63w62x#i9gZ)QYs6qNlQ1)*1c?I=UM1)1`nW%ma5Rz5>9{}q`}gl8T8(%zNBPIc z+}zwA)B3)?J|{=V@reny^JOKqpfl{=pYVv6ngAu29T!0Sp!Mpn@Nk(o4LNKQsZ zr`uvbmP`)}f$zr~w{#3(kzN>pJf#vSyk9SxjRqs7Bqfg@SzUlmyxi*ke7sl#V6jLD z;(tC`>xK&;_BLpZDqUuF_NdTMF4yzrgRzvx#zxO!B(qx0sceDI$2Ggojt=n{g3PR} z=n+5Q2Jtcu4mSXXE|N<(H8p*Fd%h#!LdXgT=N1->2}xIS6j z^767!AT$&d)Wt@7O5e`$$;rdAA{Q4I85x=EAEuvx*Uc3xFw)WW-vci?rG zTUuJi5DMtmo65Q2e4FJjQ9TWU$CS_D>e;b}M!-SG#f^{(0IV^{T3^@o^n7(WTddTq zC843Q1V#ue;TM3&aR~{l^Ye`?ELxq8gaxqNl z>|tO$`+9qUeTgWXr1bO%0H=z_M&F-Ns}yN!X#xGBAR+<*F-yF>vI6Wdva-$%4pQ*) z;z9r`^vd{JXub+`_815ZN3?+$*u$WMF?|CZ?Eh|!|I^g zu7vrM^2=UdUrVa!b%B&piCje&O&M_a!b%?ZnYnyKXtI>MtM2>A&i=G2`Jz)KhZSFi zW$zC*hl|xlz?|hMJi#$9OBaf2j2@}BZRVat8ik@$){UnjHdry3W2k`iT`$ES%Z!LV zEBc+Qg`N^XGYdMa=W-uIV> zwH61X1iO?km-xEIh4e^*97tXpvxD)jG3Vqa-S3v`5hXHb6{@9uh!jhmD?TIgqCfM!2up-wpm1)Z)eDBma;rL}k??Xc) zU+Q8aWJ@Rh+><^I1}0x3-8p$}fwQ~~27ctNR?u(>h^S4R-s{wM_lp%QriL>y?229N z1>99p9ICumW!hi9>KDN&a6Ohzm7SiAHH_px6{xwn;Ip!_0@1NV+H7QGaf3SJ?j{`l z!3T_$fV%g)4t?vFSGcnDnQ{_hmbQ<928pLSU@s!{+VIV6?&MY6^6zZTEXUbGpP=OL$*i09%-u)fpS#CE5w&9;hY&oj287If!m)nt-N1CQR*+8m;0T-1?-Dtjp=amRrvH0ja?eF<@06&>3Q z(N(CeHY3T^Hu|)IbLXYS`QXlvu6K*{2TScI#vx#N0sHa#cyY1u7Y3XiWf|z9?NeP0 zm;5+V>{V6r_nt@G>Z1AJvY-G&qfR( z46Kl#q6wzwzL57mJ@28D@Ve(WFX}i)Y*477?U35cA>H$ecL@O*;iIO-mGA_tUWrWY zGC>EY{{hFs0WHhXmh@J-b+X~y>o^Krxt~x{wRc&WNCJd^&R4;#+BfTI72K2&hTM0O zz`K;7ET>!!KHf|!N*T>ch5SYSLgj^d8Uwf>khGpgbhgh6H2}59$;IU4DiMAuK}R(z zf5*0`8vJ=-Z|T91%^2WlF3zZkk8A%cyKjC(er4|W(iF?Phkjo=Hw~BoZi#wlt~xwU z6rAt}E)jH-RJEz|XPz4=z&-s3Y=D)BDRo?7Qy=?Ha1J5MY4BDx909&cYM6+GO!BD{_<@e1e!kjxVZY`ap?a0;Bkz=^n#&RAJIo* zBWVf`p7DwQCBM)xMtOH6fl}b(-yWgw+fQ@zWsc6As}4WDDq#b7c4}0quI{;9vz&$W z)e8MYA=&^R>^?phX{HmrwS^j(vqrCHM_k-=;HBw0Ue_H3-4qr;0?Azs>qPk^r7l|( zG^fzMF1H1#SJf%BUI+(A|LQ~%{-E~~Xupj2db&rD{!!U3vA%ZF91`Fqq^)fV<|`lr zPe%^}{)Gyp_=gsC;Z$^PHn8{WjmEztOwW(}!*lC_XA8vkXKMF`5>;$uxUthgO5-my ze&T49CQ>C!(ynp&^=eU9;a9EX6N|ygh-y4~L>1mN=-<$XEfq`kRD*$coSE4X08=Y; zTkG(Djj(iak9gCL@0yule19a+mtrLpNwJh?RGL+KqDKPWxr!p+JaoscT~pZFnp(8< z){eiy`b%SSwyou8)VKL+0T|=vbb%Hi`l3sU2O-wXg;3Dz&|&Yy8vP5I&56+WMV4r( z8qTLAe5RWd-piYz4R|o{9RIiuY##jeglM5!b#9X`xSGl>SRs%<1qree2$p|2y6(42 zxiugkv%;M%5WIITp9{yW5aA3A=nc>d|87RN&6a?NE|W|xy4gZIsrvKYM!zwM90KI$ z=A0%|6Ync&KmCBL5X2`BACQJ?$~OEf)Dn)8g%AX?B(WRl{W~HX#6a^qCT8kvsT#l> z?_YlT)5v8*H+Mbx&X1Xt@&(No=qvu4b~+^4Suzuu(uFlylVE7Pjk2hEisTL@4*m5P zgE9D(hJ*#5P6DS%tGdYLbN2Y`>}s=fZD}cG?7(L=ISuJN1d4nBi!JnipFzXAiG-gM z|2XnP@bni{&=ab_+J!_O(TBRO{yXgNHJ9d6__>z2ogUw+EMdxW7WI&YmTaA9ZPPiD!(pOn|$RmqBW^uyx$uisoXEL(y?goL(QURgYm=ByStvfLS zSVr^LRq#q)8-endybD)2M=!a-q-$+hH#axR%F1A1U>A!KRNGFZ!c+g~o9EPHYd7%q zFh7g#%OqP=IHi9R&0enCaWQR+bf)3$FL89HE$WeAaoT2-mRbO8DCN43HA{^gy6TIP zjvkZ2n_K^0$sF)nZ>&`4Ce~I`tVFcF1NU~nzF_!o$jw)xNISNdHN@B4Ti$mKs=KYZ z5;@;bPph$3Zyn3LVcPM-Z1qOGjlo2f`uR1S^jNgi-1a*HAMzKqcl%_?N3)!t-D&gD?n&x;o0ij+MvOfY z!Xg+78#Jn2Pd_}6r%K-eI@2fIq`5VxuGf_tF+XmEWi_z)Wn=2K!vFo>_L`O=8TitN z1#fV0aJ}io?Zd->qBFgImzRaHaf97f7Xb2&*t+OkU0s14@5giOU~oGD63IP0E_*>K zsgaEhe36?0tK~0AihmJvca!h{n6QzE)IJ{A=#z00aPw#R}N~oHnEa zsQv;J`D}hYUER0t&vzsdVw6Qv@`IBTkHc|n-5pE-d*kc?&=SD4=Ns*u!NS6t4d!t` zj94g>!t_5b@0XUA#_M+J1wnoAf_4PROX=F+4Rjq~2s4u(9&r(}DUejIyN3;_Dvd=y2S4h~m)n?R|8A zhlYwjN)@RZ>g)4eSICh)14!vKYXq%fPESvTbM1Qqpq9;OxZQvKs#`gKiALeiwlg!6 zD4t_xnIH-zHs{;i+=PaP)@5jJYvbYN^&E~RetCJJ`vy-+S{p4`JlCpQRaRD3S67#r zsV5{P^y3GSxVR(S=;WmQ_g+RCno%GIotZ(QZ-=s1tKrx=JcI@Tv9d7>HmJ9+kC`HV zyvb&*|JTUqD7YYS>Y!*|O=F|?R`l%bEE7dbtfoZo(H+9qul+GLHa4|ea=j>ULC!p# zH8sef`-g{TFMi;JZF?@j^>&B;XI92&_WN?m2FI~;TT3o|S4E$nB$q19!rUC%L%gOY zow~OOE|eug{MzW@jr+jB0IK^BaQ`2Z8Qj|1+Gdj(apTi7<_(NoalT4QGh-=?;(=;&yG-&m?PNWqKR-{1d=fPjI4A@EsMUtiD4!a{bm6|BqPHj%~-0sWN=;0^X+ z3knMQe=tqg=>$)h8o(h=y5B)76lWj9eueIvn{&F}?#Zr%H+;I>h>MH)EsXbLO&j1S zwCHpc6cpfsW>;RNWn|VmJ>A|CDD&!ABE4v5&Ne&`Be8)ABr#_Dw%=_rcSxzQ`=w4B zK`bz*WJz-5=WGK+=_pC5V{Rl>dr_VBOr9740YPTx(D3kZrA{{{9DEsuvwxm^Y$>~+ zFS~Wuby?wPkn+KEfihTBQN7bqbx~1KW^+4`^Y|~UCX=g5UMeFl_s^i2Jf0CoSx}V@ zyOqjoF1Q7EaypF0?>4uv$Tj(t0G7NZgQ?LGv}6?+b=sZp_xNB+^a-E3hZ{0xM=L-K zeKroi_D?M0TYy*fsD7X2ej@X=O?~h!cx|!@`tytD(#8J0}kY@ zaim9jEQ1GW%BQNF7&F(uypE2y6a}m=VG-Hc*+jg>k){zo8xF-rCuRD9v6$*GT(7hUE2xP|SOkzA-JE~EFT}wTF0=yQl0Gs0R426+K3Zm4r z{2Wm$>sY$(7u8o)RUP82g@{3^lU6&0U>?8d)=yF@QN?u!c*NOR@K zAux0E1WKFa|8uvcMA(7te$A2A$DC@+USBL)RAeI%|9*N+FCofbrdR;+CFIUWdrqQ| zP)fRub+S}%w$8}Vu*^h_nVzRSN>Hi1tU?lCVIrdh!aQY%b6xR?6Yv`Q@QDS04`Xp~ zOaxH>rv4-P#ph$tFYVYu{%y7Q(f6*XSP0}nSO83ClIuP@&V-z;xKA~_N&nm37z2vTg1c7Tip_p3w1 zXQTfHYya0|NHxw`%;di%iwJgByKQv8d^UJ7|C<5_wotLBuNi#nRO^o8%-kl_^1)EM?+RuXHp_ zS2Ky>bm6ZE+ODreflPJhPooHBuB&jSdwf;~mN-Oe96>F;x*{q#M4Ju11 zDIf(I?j!NTZ4_tg5(*p(^N?{w+}yF=cdBi$pfZz_a?bj$+5E@6QI#AT z#yywi(&_B{AQrR?QcRV==C?+^R^>P5>~{vVECYg19bxVw5eWla>{j_rbJ_JLB;Cgv zTlzmC>j^G)R)1nvSlq77^-N0WWDf^FCVkMAU_m*2KToqpUZB%%X&hM&$b+b7VqEKt z2vekEbUSxLSJGW*88%BGK~%GZ-c~6ln;-A+SBZudYYSM)>D}%fmFKtuS2^9YRzrUJ z-DoOXsyHkj>uw1M*xGfGCSnVH4P`a%idVj7Q|yPQkpcfl8ox(@Jq7Im~k1E+~l z|62>7cNuzA3+lkRS!9Bd3hQ`79Q+Y9FyWpc8R?pF4kRHgj^k zlsVmhS#O_6K{jydzC66DH(Rt9Q$fhB_^0LU?{j5weEIk}Vg(_);x|x%=gC=`N>B=- zu72lPyqjip*~4$WDrn;pAFRxe=rSWCGTP06Xszr`46Wu?t`Q|(YBrY7Rn1F;Bil}c z?rH_QB`t|{1mjku<_oxw=N{g^kvWKO*d@XV;MA(78}3SEHpa$U;c!tle}o^f1vS5RGKMPbEydG5yiw3+$adS4$$6P^3UEKDJA;9A zy~_LFRt)J>KFl>&yWd8mip1ZEDWU$3_fFUB%MoFwTi`1|snv5PC88yzZb-h$d)!v^ z1os}vxCQZ5K>F>U3Ey5kkg99;(|c+H^X!>{gNM2Um58S?pvw`-=`Yht!}ZZf3@x;# z&T*_k2de)q8GM@aZJ%e*G3*6j@#*6%gwi;C&ybxO&AkDyQ1N-sL7@aH92gN#5@q}G z*RjKLr?-S%3TGqmcihahK9;op#C8-trP3ci?8u%d=ytH7+awz6!d6*b^S!McP4D%u zA6=X`EHZ96lKLncj@woVy;Z}q@7jd5rBuQop?Z+&M^D~|&f+ZvozjBDSMDGYc^;2! z{j79m4}LSo8=dhqmS+`rl@Q_LT6nR+i@QjRe*s8ih*)?P278&tKau_*61n^^apft( zF883BH#VHkF0%WVuZk7&yG2VIzbkyuv*SpMfBvL{mxCvpI@my=L9FK>gOFs#RZK{HQV$Gqy`cM1C_vbC#2<@?HkjSTl8RQKLaUZl)=^ws!n<@cyu(yHwXh+0pin6T`A)y#|l`o7J&lvOG2C2jMx!GrE6#c8JORy=m05 z_FdoSYAlOBWuhc5E5cd0{I7!6EOoI8^91Fu-n7v)Dp9YKf<(Hj=;gfa3To;|}3hn8-ueP+b#70pRG1*N>bCFisL zp?5tBU3vojW}U}^n%ZDV9}^Y+r*FrkGlihp32UXc+lehCC<6roR*fpQdwaM|wh#0I z+Ik;>H}iedF&hD2s{s$kY(_fBI>gQ^Maj1t)lvT%I-TM#zKO$x5e)I1tOh;GBRlI| z*G)lA1M#D`Zr#8xxbM-z}NTyW7{0F}SH1snM@l6{w}h zOelPgye-OK%~K9o2Lj(*@wz<9FqRA*84VT4Q|;!*z7LHIkR5;Dhf`^?<;LI}n;q!& z!XJwVR}<^)y)Q5`)*{-q^SRR*SkwyStR|GAp_V-iW;7G9oH${9(01B^F2 zE%03M60lcCeCAtEH&|;uq-a8oq1M`aeuQLDChRg=>I5${Seec==$6-!$V5r9pPgA8 z53f_TnCB}T|9uIm3^t-iM>KOcB)*1bVyWJ=_)w^D5P3d7LD3z*&OLW6Pb`ARwf^Mf z1PFmDX0i3?tAm6-L<_=aO|}Nt+uf=mQute4FIE9?E$S>0pZHb$pGtajt1d+2X$0n^ z@6LB2azIhlHeQM^;ztDgU*-cg00?LP*XF;JLI9EE5D~SAZ)h2dvKHd$jKxjmNqd|+O__E$i`RaO&~uf zr9;4BjTI!^1M5q&v$F&AkHH($^&=_pG$~*})cC(tWkO@Nywz3zpa(!OC{c6f67t017!;f?ibTKaTPd#BR#iA_M&qo1{! zF!?#96;gQJbhHSsiuO?DJnsDhA3gDSgdd2;NOlTs!-oSFzSu>Tp?J&yQEeF_5QCFXoancU|omIPOj4|%wt)!bG444qbs z=bma!B5kvC(t46#y}_ybvcrk_|Lo@ackp_M3S=DmW>wR99GY{21S-)bF7ah>pINKh z4hR`rTH{LxjF;$p1+&|y^4*h`Dj8-^(KlR~iQ=f(J`-aK$G-lP&H`jy>(+;_huDF!YbDkxQ zQyqfAe7E@fioya{ewhv?^0TCF2&9jz36-LyRBZl|4c zuhl?ztwYT&Wo8D>wV4=X?0ui$DZU=~a%HK;d^_AfW9JlV>6;VljljR@=uGnV*wvsX zR1HoNyd4DzI$NRWsp)&V#>Rp`{G4Kt#C~|;- zuKCIe28!Qv4-(|~$(*iB)*kNp1HtV3+FV7eBHOpk5Pc^;$hjDJIB8PfMYAclFJ+Az zDPXJ>e0^WPyKWucR&h}`HnOb}nWUhUM7TyWi6g1S`8V{NfrE=O6UViJswaaS!ThG= zP{CF*1`_S+QV%bvwJ|i8WXpH>yoGW1Y*!+LX9&*>7t*u;e1*KOA_zO~o^hK5;dp0U z1-Ay=y^@rJZL=Q@)~{Y(X=F@ent+HxyOMJRruyVlglB$K>W#?qVWm+&hLQ((Xb8Lc z7;y`R&={oj{N%Jk&`ZP{_0V$bo$@=8;O>s0OxS$7i5-@`=_9%9ezL+nx%$_kKNcwL zWBT}#Gyd&mO77vdbq%BK&?PnSs~_b0zZB5l&S$1})XfcUY%TC>n&@;GV>I3(?F4$R z>PljU_NCdaF@>FpPY*{u2#!>xGjo3?i^TONxbvBBT#F4SG-np|E^67yCfz2TP!g0-O4>thUjFT`oY_WCW!CjK(n zMG#!#kW(utcElqV8d7u}76X+?_E#JQ`06n8eNh`)0+NS5)7=xE{?gX6I~5Mfv5Tm2 zQBcU5d07YDd-=rWdvAd77|8E)iDauxE?4L*qXNO)MGbN#MtILMeJ7wKcKFRyI{!i0 z72m(D#6$%;y73eW;|5P3!QO6jk|J&dlz}@XIDF6r#*{ibZ;>bE=@h83isP%X@se%H zJs{=olk^GU&vaAr`QVuH+$31GGsM5hOdv@L22zX+ncf_@s{_?#*L7CtA%4d+32EWw zd6DRV1eMj6%AY1}w|Ie&lTw6CXkw6J@y8MA(eSe6l2Y>1BCk7$7!EUr{h$yT%$^Gw zSKtSK5rm@PGGCNOeT)swV&FEY|KoJym3v7}&=}abI&RoubVz`S?0S59AFR_bI;uH< z$;_I(i=j_=+c#YOs+Q#cTS7^gB*{r2iB7^lqdZp^nS6*G9o(khTzv0D1OKEwk$0!! z=ru{*ea)(``|_28B`FOFKeLn(_jyXohuV)yL9-~4d>9>EG>d11*5{mEH%eH^Z1S;^ z{$o%TIy`mg@+vb}IoDr@ou&lU>cD4e7@ zAX8jwu%2!*et@Cs7&_>L?Mf};d!sIbnWPDwf_l zW(AsBTL_=jMYlNL`w66>QNu^6=i9~v>TuM=iO|8!6-*8IVSOV_A~U8w57n=mA;5se z1$TcVn{O;uc)?J9oj{99CLh${GhU8!e6u7cWbwsqYFjZz3XB?*bL7J^pYIMaHb2{L zEMp(*G}lSaGTx{Wkm1hI_rm*Dwd;Gqs9ybzRM>3#pd}1SFUQLGpK%e7YX8<=p@VS3 zD1O(cZ2`}3{pfuM!tQeLqKi-pyxiFAL`XKqGy}uUfs~j?X zEDOdRN7w|4`_!>hGP`pZZZHCqsBpcy;T?J$o5(0p zO5!lIohHpu+!yNK>w;WFqHMtq3h>mSKEmf?JcIzb++R*_iPxB=vzqDEe+|9E*}NxM zBQnuIEwR$(xHnqC5)2}`pMuC8`J2|XKC+qf{WS9Pgi8o&d1Z}5>G7|o&tGzn+$Z14 z8Jeo6ufF60`r2tkH$^f#(jif9{zN6{b&{HHQ?(s}nqv9kj2&tS$Ceo2#M38jE*iel z$+Ys0D)6Gc8N+<}utB-bJbsd@;^xfbF4SaNFB)T+$0K;Q*dS1l_*FU4_+x?#5 z1&IXfZ0>$1*hLSU<=77LrtVc{>!5G6E#@$TjZCpW`mhh){p-jui4$eBU8d{->ZeJ) z9i<=MM%co1SRPgB6<2p{{oA zp0nDhSu&3=IhK^sxXj#~c14X7wCDnheyI+=_}HzsQCO0K&kWijMNOb4W58Wj71KiW ziDC+pl~v@Pq$?easO|+3AfCo?=Jh`@mA*ijvuJ_WeN!neWEDLtJm&#-Q5u6jOsrk4 z`}=$+`TgfXzjDKyWgS0tz=V$`gVp6hfNs%FXzf--%Yr z32)29mNq)y{mV(*P}V_16*G^DSDQB~vOR9_l-bFn6P6jY!U|Zk=zb5p)H z6yZmwK-^ez``RF+2-^N^{9f{D*Ok9du19E-+@}afRtq7#$!v{}-9}gZGx5G2CVF9zu3WqK);;$w^xd5ge*w%Prh%{hN2UDBSKrSezqv-EUY7b2Hu_Q2N0%+Uv)fx)wfD~KKjXwT2{lDfItZfun}U0|6tBl zDc(l9YcnVoRF6l4Rz`9|FzXKS3K{qg*uR1bG+Dd9>8%}pYEq5*9KT8o$6Clcmlmnn z{%(SOIZYfAV!yI^)Z3{BwIS}ofJ!?$@bEI|LC*N5nhCUHV_=%m-zJLk+9IlBjx}Pw zeXK2%2<8b3?eaageUb0(c3jyTQik-4lTX0TZC`X^84a58`N(BQoZRiC<^9KRI5ltP zg@)c{h( z{%5ajtCw|iO=S1EaPOoH9ouH_hWGNQ(MuH5v?OvlOK~CmZ^2dl0y~nI&eK2%J*igZ z>uyLSF6ZU-P9Hu4UiK6rhqb1&vomY=V{$q(NNbHz>+S7%a0dh`@6)b*F4$;NCvX@* z&>!QmEN0m;7m`ab80F#CQmc%4{=?17@SXnk%1CUdy*T|Wex!_TZQ#5i{@c^L*Pgw7 z`)nE2Z^q)ZiVl(aOIk+zX#ch6jwTpZGBRU8JX%;-h)*>Q==?$uZ~;Z(;=M#(H)|XW zQx>6;TDl`CJ4kg zmwW7kjArj~xv{*h3s1pyr5(ydt$8oqV7N{oEHZEs(Vtp$3RzN^t{sMsQ53(E)7hEx zA3mHs3~7!Tai6Yd`bHz7$8?*b>lBLQ66rBdj^J|2|26Das5x&^=wkTwt>(@;B{RMj zm-yGn=76noXjWsepXls)8gW3n0L*u~KB8Qc=GxOW=4YM(zpTejvmX=`11UjIOU+<* z=^rZY3fdxqBo;p>kkk0t&EMVP$gxTYtX6`EFG&&1EpN$C!-N-%mwBG`Jy&|?^ARWw zFgMpO40AyY#VkZ!RdVy@=2cL#oOi^%^2oz(ETcaw;9qa{*Z<_FYR~{&1Er z7By17+8rOXQ%i=fEE0F1k#(SkzN~WbPW?bA+0}b?nlyegT{0W~y4;eo<20>Y>mSzc z<)(JOdC#Zk75?js@Skx7bXbc+AdzaojLu?<_vn0U&hd;}g+1GE3BfXc0HZZ?`7y=( zm#@|6_vNwgsq^(ih|njiU-+%0{#;Ze5{1E()Ha3|7x!bnVq&U_84C^SGZZL~_dM3Q z_=>ac#e$`K<>2SPSbXg@LgDn*sd~!3?oMF()0%1ock_3Sivh$|7jGFP8y89h^K zKvTW%@?O4={8E0`@SO2ozMB7mSjxaB*4h8Fm7j^~n1s@+n-8H5ja$y3%d5szat_`> zQMS{K2ddg~!O}yLv(XxC$nB0|%7pbhVsYFnSKAspJJgayLN)FRHLjhc}SlXOg`W8n@6XDt|jtbffgpgJtSX^OeC~Mcm*X!j}6ixKzqE{)^59F9C+~- zBsU@PGLuF;LEkS_dwqlNF~bU~{>_t7bsO@K5i;b;k1jy=M1u}HDp<|D#IS#NOP!2B zVL*6#ut%_8H@lPVb|U<+pn-3p3?j*`(XMyZw~oylEl?ES@8A{NuVpy!203Lk;>uL&1-+ksx(SJz|ljdY+D7!j4FDsb%gAKL% z?c)sNMuc4HS8@(CPUfn{e+WL)qB8ScK8;^e#-3746w7ty%Uw;RwKKf} zo>{jqZk8Hp+p4`Y2F{s)*zl)xPK4{PKQV4-a|9}MUS1NxU0V!$FCKS;Z<{}Y9-vcH z^qj+gh4nq_QVStNy;<3g?@IV`y$IO884*@u&(i%j1_=)vKY=WY3VnN11$!zfjG~DU z89Ikc!hYb$QqYq^aDrK@&Ikk*E2fRZtQQsz%(NM7z6M-#lSsytOqDUyYXs#T2H(R| z2kXd|5mAviOupqX+YA^g?6cekWvr>qOQ6rLm20I%aqy&)lcB_Ib$lKHKd#gtD>Aov-3j+fdbaM{AZLoi$ z(bBVsx_kD|RE<6$?4Qh1xScZ2gL%#)A-JiNjpm9))U39$gI}uoh)TfxeDs?|UDmaE z6fGYnEPZi7$G0l;GB|O*boLGoE?Aa_*dzg6Dw)A-8^)G^u?eu%HrJDDy8 z>B9MEv%_Ljb{Tw56l~h_p69cCpU~A7O&Aq?OI!7`R&yrAU`*yZ)B3cRr$1FaTo&8R z=(^Jhf%yIv0QHIdgpLphr^c;GNrK{y!km)YEC=r~Vl zmOCwb!tvd5PCJFspM5$@Q+Lj+zUHH5o;HOZ7en84VHzr4ryoia@7n>TL@RP?Y5&{m zo%3+2DFm&}$pi1*Xn>z7^fkuj;~Vvg(-k<5eDDmn^Ab7YD*yAZmB0mq)(}jzjW~wJ ze*f)`wa7{bHGF=t&fjw;$z0|>6R{2XD)>)@&O;=<T4M}xdGZNc@kdU_KJ$}>FJuj=8F7$5sCzZuKt;3{J((KMhja;5KGc(LoR3&}91fDp6u9z%NGJ!%)cnlh(sG_1%XFQU{=LvKP z6E{yXK!g8RLIc#5WH!8)subhk;P`yLKVhA^s%UAQ92^W7*8Y(-4YBhY2*>d7@PI)g z=;vz2^%tyhyV?WPZUvEB>2Z*i=!<}Ivh46S?$ET*ICMKf9&-%Mv z>^3{NDJV>VivZHSzMAGf#BYrHKoxSU=|tM(a#wix=lo7rJtPqEmd|f(gH>}mx+@T(7?1S zDJcO`5ZxwgEwd|Tpekmq$!1rj8YmtD1ZRoph+{UjV8UBLTPg}m_rKL82&TRXy z{M*;Xs8CP*=pe0J~XSQ!~A^#0gYT2@sl9+0X-uAmDGS#fp>VTJI=8 zhUD$-ZP4Maq^%tuXGiY+`k2Y@{nFuaM|KoTHd%MJ^FQ=N@gCMrN0UIwi(Z{^7$gC1 zm}uUN`LAEU3{F9SULKIlQ7bKN1J|6Ho?fmqiP*^LRon^5+OfcIx%5>Ma4RH^9(=r zn29kuI=UhwGaF8&%9Tw+ARs`&i3$d9gg&KBbni*E5e-V1sqTd+WGBYSKipVod-(^3DfwgeB`Igor7dJ#v!gfdP^C=!EQ4I0tK@SJ-^KrNy zdpwf*jDqa&L`U>*$n<%Cu~~1a9cdned_rWXn@xrxN{8vPwJ-`mi}wEOy3pE! zDYfI-7rgoKH}Ja=LF3l~i6W z7oV|L!-H*Kg?j_txN!nUcBkvoOgU8^UtSnAoDe{uBd3SzZw3Z(lu!6aZc$F}vE^Yb zT-jr1Jilr+=`=K!5NFGi%R>3oS6`gfq{j@XLti`}+<8(V_j}UO%1-s={;$;T2sJI6J|$tuojDyB?$1ZnuxC)|K<9j3&BWmCT2`&+hx5>If3pyz@h0o5Ek| zK3xv(7iW-h1SoMz*)|Sw)Qzc$KK+~S)3fhCeWI_-^n_KO3Jh{ePjTq zgWV%hma*a_jf`+tdTrV6C=Yv9?Y5%Kc;J3dyrP=gzBL{x1`rVGBF#`ly7VelP`Zc$hN5(&DZNJlLs3AaNkWk> zy#$DK3>~ERu5_dX=_2L7j^~_P#(lc`ArCVdYwxl4%2;#G_5Hpr%L#4L;i{Kji8uhn zhdlBW7?@itXh?hNle=Q~Om`fur%bhWgIT-D)0|9+swH-2`Nua;u4r}=<>y}}N~_{o zuudJnok9RAvdF#Bm(SgmQT4%L-_(~Ym_4EyQK~PV%3^kMW~NDCTbJl#&A{vdkQ!5D zHvU@Pg#tYIBXDl8733@YbSG1alix*WrOU6oD+Q~y2uKDWis1jIOx6Q6|L_+3xk-X^ zUCwATFij=ZNk57y`8ZPRY$tGVXB%PHnVBFa*m{>T4aH8>>P58f_<2L{B1^$OT6X?4(Lsz@JI|!{H#O6lAJ0c8ok?YLTPG4 z0ui>eZ%S~`=_NEam)3jGOUgY*k^;L7!Qsf2;n5oTry|;#DSPZHcOg9%e%rG|2~!4% zB^}8Kz5thU;|OiMs5(y}!NrFS2YLCF+LS~=A`qXp9gLE$lL>ci$iW-(UUPci-t0j0 z$i4n0E7F2}`{Bu4gM8HECCzjy#k`d@VlHOZ?VkU3(@dvkg4Z1$kDK&-&FQ;XEHj8ivRKdkF`msSiImv5l4{R%jh)yvxJhqZ^ z*nz9IV_+h`&8zir`n`>Vie@NpiXhb}#fvL{0}Ar@g|^~jqs?YL@Y?BmJhX2%!*Slx zXG~_d!1-}j<`$e)*yig;B>&Fm@e>j@CR6xyVR5`6I}vYv)xeC>@xe>qXDUs#+TVd` z!|k2mhLjESdhw&dyl?NCEBFosp=Jb-^R-LqqwKy;@%xsPqe9p5p9|d;wH|>g{M54e!^07m6n!%Lw`})@R=QES*7cn>)0m?@ssOa1lQpo5 z_{Ng5OkKFhM$POl-}2yT^ia^pWh5I#Ico+A7ycCrd5P+PmkB=?d60hNN2`hVtxaOU z2)I`NoOa{WGJyKbMeK+W^7yYxyTxp;W72H}L&`Tf5+DC+6B}E{F}F~u%6}V$K$;4| zI^0W{?MeEd9(y7t>9@BYNO=qW#LNV8lFhq}M9c7K)(JEOn0mUs28qkUR>2D)YK~sV zvQ5I}sm$4Hi-!x@Q(lCIkLx8n@DIIJX99#_H-dI6zK>@jz3%^L=Vc)!y*xYir0_jE z?BvdAq>J;>!sRwr@wzw4wC0B&V}I-H*S1v$+NFnUV3`)jxa*m=v%ZJL$nFQzhgHZP zDljr`f0-%Gf&LON!9#1rid?Xbr{kk9z5cA{w+S;u@h=cUSn=$bPOI{SywiM9`+dUh z!M-9^UBm1SyUVPN3pQ%&QJc+QIl?l+0*aSw{`A>=pzG>Il`Wy8igEH3CXU%9YTOQ2 zW$_L0{&@fNl!O@(3jSK++pbhFKF2rpSL3x3B$Ga`>TCFU*ylFPG?h;=Z#& z5cy{lmut^mUCS|;OXTFV3j2F|R+x!#o_9vT04S9Fs(wi#7K@FJ)_we#>5mw_sfk@q zjv3O`)kV*5M8(P)MLa_AxkU>N5-bbjFp z5YO|!H`W6>`SBL#D;wR;Zg})(Q}%`}wZ??!M*Imtn(l0oX|Cz2RDEgcS=`-gD2AE2C_1U{v!gn}{!m{5>V)6fL?`-36}@&strQ|fDhQ_}FT0ptUyJwRRx z#MWiFDODSr95Bg_jvFfjmmsNfJ_HbcdIylae=>9+>zbO9m6kRJI{N`>N*NFbJ_K1e)Brv(%riB46aOeY2&dr4WwQQBr!^ zjHCdTNNG-uAms&Z(i`EbGVZHX5?Hhl4;x$BKbW{$W`;Mq5TZ4#Iez+(Kko;w1h zU?8Ig3_h5(YZ8t}Cw>ehL~{X$rfIhWC|P7evS+)xH$eqO$s}e6Zlj^0k>-O3a{s@6 zVr)DCz9}u5873V83`1w9J*b4z5P0B|0EF$B($aAt(AN#Ca=%=sS?j*0547+_aBNp+ zXTVuK-_4szXf)exf1`W%?E60|8Ep%cTX!dbmm%+W;2+p{hVb)Z8yWU#zi<*>!Krfr z%19!5CB{Vc%d;Q@GlS%)JoblhzVgm1wCVE$PG2kt2YCp(-W2J)tJGr5tg~CHs98<= zN-ssL6dW;J6(St{iL>Pq#XBb6*QYVxcw8(D@7DZ2J^6mUOTJ~r3o-Y5nqh}Z8JGZo zCHwq%Z!Ic{(#6Fk&T{(#SVk!T^#3K;4&-0TVm1(+ThD?v8{D0Z_ibzkhz-BF?fDz#-ZC)qaQ091Tz~ypT`{ z4yPNJEsrM;}Ug6e8S+(4(NeUeh0xMIn!mHSWzP>}#62S~u3Mhz3O~i!@O~6@(Rd zYh5=N+L4AVmA^A-hG$>NHNPr)SYw;HI&`x@qK@?%Qy*rb{OTz-fSs<0&T#F67I|j) z@Ot=()gJdfLWNbY`%u_tM?q_+FzU!0OTcbVL@jvQZdq=d z9-G=&rGksL93G>u$k_Lc$78dh2(96Ff|F5Z=KWB;D5&X?t`GUyCA1 zaym-N%6>$y+;NG*_anln9)Eb*wEAIE%cx=dNJyqalygw&^RBQkfFx&JGw zA@1cRj!dcCp%Lck^XLQ*zXgilv0FnM)7`3G_ZAm^tac!zh2^jZYZ)FiVu>wq(OQ~W z*+qxobkRuP7-+WfF9Djb8kt}Peru@US=A*TbFUAG2`GvDg8KMCVesiq# zPGjRL`QW`3-JQ|Lq^va5YBhH%Y!?+vXd8t%y26S3;Glq5ZLvO!YN2XYPmk=^v)Ms) z(c^xSL`px>_dFQJVL0FF$kC`uAkD z>T3&KUkrCqzH^$0jVr=BkexT3$@v7VUhuh$e6jJNGPzUkFcLnj_65-wfpp?Q9-F}v zm!>1HHyR5k;~GXBX5XWhb~dJ%RERftrai~ky(Y(( zabJNmt7bKmdq4Zgr*Zzd)IqW}@S|~wDl9ik3HGRqG!a@R8Z7o1KYZU~srzKw@Y%6~ zcT@St1T~v2(}CYhWp3tA8QXj1*VcI58ci=Kd>A1g(+laeiaCI82E(ec+sD3?pIm3v& z_=n<*%L}OGZUIwfNx`-OVWlrr@kng-Ub3cB>VOLS|^_}`X24#tK+?G*0 zg6pt{=a6vVWDBJdu0J}Txn9~p1oLAg-@#uKFy(wQ`Lh4i`mUP&*em%7KeNt2sic*9 znO&tMmlag zMyam6*WcsTmOtF_>`s|76_R@3X;ndCT`k1{OeoGuy;B;OTW_-R4e45kjHfGxZ9Lt3 z|2l6U`ylj)L36BhIJ#M1ecIF0WV1+$JmcQ*oAD#HI>dF0{zF+Piy$SgDKgRdnP0W5 ztWPTPhqW}6ivh}Ixs8+|8r)RSx*zwbK>T=0FVA57ms(zR^@QpyX~H`5`#@962a2Kb zvvv#*=<1h&bs6X?LjvyD*gTv9`%wuc)dhW^A3Y&@? ze_@GOZ2L}0X7V^w0IS#Ou?Mq7=;HkR;OJo7cbgaTVE|RBYU0k@gqA7MzF*PnxxTvE7!a{c0soE8(}BamxVhVS_CePbsFD3|D~O0tS27l-qtIyJ zoD?a3K|@=cf|BxJcQ>=y4tl2fcu2RXd zH*^yc6Wyw(U%)wav-Ou&R@mfyhi7I?`RPU1epa`?oi#%RdU`uJ6sW=mO1k8L+fbPq z)uU|vHTBl$>+1|2Ii7>Yu>#O3EiQJ}LcG)C zQLPUoO47(@ZSK{1mwR1e+}_(;J?QFEAa1_|LZ1`zb|j`l3udUlgt7Rs*rnk LbX1F#p9K8}rZ~M? literal 0 HcmV?d00001 diff --git a/docs/source/developer_guide/index.rst b/docs/source/developer_guide/index.rst new file mode 100644 index 0000000..0a52763 --- /dev/null +++ b/docs/source/developer_guide/index.rst @@ -0,0 +1,14 @@ +Developer Guide +=============== +This tutorial will give details about **HOW TO** questions, which mainly deal with add backends/algorithms/patterns to MQBench. For each topic, a quick start and a full workflow are provided. We introduce the basic structure of MQBench, and provide examples and guides about how to develop. + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + backend/add_backend + backend/add_backends_in_3_steps + algorithm/add_qat + algorithm/add_ptq + pattern/add_pattern + diff --git a/docs/source/developer_guide/pattern/add_pattern.rst b/docs/source/developer_guide/pattern/add_pattern.rst new file mode 100644 index 0000000..a61d585 --- /dev/null +++ b/docs/source/developer_guide/pattern/add_pattern.rst @@ -0,0 +1,80 @@ +Deal with New Fusion Pattern +============================ + + +What is Pattern in Quantization +------------------------------- + +In quantization, there are **patterns** of fusion, which match certain pairs of float modules and turn them into **fused modules** [1]_. Normally, we apply certain patterns to our models like: + +1. Conv2d + BN2d + ReLU +2. Conv2d + ReLU +3. Conv2d + BN2d +4. Linear + ReLU + +After fusion, modules should be convert to QAT modules based on **mappings** to get the right gradients in finetune. It will map fused modules to **qat modules**. Fused modules have to give the right forward, while qat ones have to give the right forward and backward. Let's take a look at ConvBnReLU2d. We need to align its behavior to hardware, which merges BN into Conv2d. Fused module performs Conv2d, BN and ReLU in order, but the deployed module performs a Conv2d(fused) and ReLU. The quantization infomation should be about fused Conv2d rather than Conv2d and BN independently. Besides, the BN parameters should be updated. The code will be like: + +.. code-block:: python + :linenos: + + class FusedCBR2d(nn.Sequential): + def __init__(self, conv, bn, relu): + super().__init__(conv, bn, relu) + + class QATCBR2d(nn.Conv2d): + ... + def forward(self, x): + running_std = torch.sqrt(self.bn.running_var + self.bn.eps) + scale_factor = self.bn.weight / running_std + weight_shape = [1] * len(self.weight.shape) + weight_shape[0] = -1 + bias_shape = [1] * len(self.weight.shape) + bias_shape[1] = -1 + scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape)) + if self.bias is not None: + zero_bias = torch.zeros_like(self.bias) + else: + zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device) + conv = self._conv_forward(input, scaled_weight, zero_bias) + conv_orig = conv / scale_factor.reshape(bias_shape) + if self.bias is not None: + conv_orig = conv_orig + self.bias.reshape(bias_shape) + conv = self.bn(conv_orig) + return conv + +Relationship between qnn, qnni, qnnqat, qnniqat. +------------------------------------------------ + +Feel free to treat MQBench as an extension pack of PyTorch. The first alphabet 'q' stands for MQBench. + +1. ``nn``: float standalone modules. +2. ``nni``: float combined modules, which could be quantized into a union later. +3. ``nnqat``: quantized standalone modules. +4. ``nniqat``: quantized combined modules. + +For developping new fuse patterns, we need implement the 2-4 in MQBench. + + +Add QAT Modules +--------------------- + +Of course, the very first step is to imply the standalone QAT modules like Conv2d or Linear. This will enable the quantization forward/backward simulation in the training and inferring. At ``mqbencn.nn.qat.modules``, you can implement the needed QAT modules based on its original function by inserting fake quantize nodes for weight, bias, activation or anything you want to. + +Add Intrinsic Modules +--------------------- + +Intrinsic modules is more near to the deployed models, for it simulates the behavior in the platform by performing BN/ReLU merging and so on. Intrinsic modules wrap sub-modules into it. + +First add a wrap module inheriting ``_FusedModule`` into ``mqbench.nn.intrinsic.modules.fused``. Actually, ``_FusedModule`` is an alias of ``nn.Sequential``, so it remains a float model and will not affect the function. + +Then, we have to turn the fused float modules into quantized ones. At ``mqbench.nn.intrinsic.qat.modules``, implement the fused modules' QAT modules which load the parameters from the float ones and perform proper forward/backward(quantization and bn update) like it does in the platform. To be compatiable with the torch's API ``_fuse_fx``, we need to implement a classmethod ``from_float``. It will load all the parameters from float modules. + + +TORCH Related infomation +------------------------ + +Torch has deployed its fusion pattern at [2]_, which could be applied in MQBench directly. + + +.. [1] https://github.com/pytorch/pytorch/blob/9cb52327a867cfc7878caf639a31fa5c860803a6/torch/ao/quantization/pattern.md +.. [2] https://github.com/pytorch/pytorch/blob/07932e27356c32df8a3c17361e4779c635d2d8ce/torch/ao/quantization/quantization_mappings.py#L36 diff --git a/docs/source/get_started/index.rst b/docs/source/get_started/index.rst new file mode 100644 index 0000000..76da192 --- /dev/null +++ b/docs/source/get_started/index.rst @@ -0,0 +1,12 @@ +Get Started +=========== +This tutorial will give details about the whole work-through to do quantization with MQBench, including: + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + setup + quick_start_academic + quick_start_deploy + support_matrix \ No newline at end of file diff --git a/docs/source/get_started/quick_start_academic.rst b/docs/source/get_started/quick_start_academic.rst new file mode 100644 index 0000000..c326583 --- /dev/null +++ b/docs/source/get_started/quick_start_academic.rst @@ -0,0 +1,76 @@ +Quick Start -- Embrace Best Research Experience +================================================= + +This page is for researchers **who want to validate their marvelous quantization idea using MQBench**, +if you want to get started with deployment using MQBench, check :doc:`quick_start_deploy`. + +MQBench is a benchmark, a framework and a good tool for researchers. MQBench is designed easy-to-use for researchers, +for example, you can easily custom Academic Backend by provide a extra config dict to conduct any experiment. +We provide step-by-step instructions and detailed comments below to help you finish deploying the **PyTorch ResNet18** model to a **Custom Academic** Backend. + +Before starting, you should install MQBench first. Now we start the tour. + + +**1**. **To begin with, let's import MQBench and prepare FP32 model.** + +.. code-block:: python + + import torchvision.models as models # for example model + from mqbench.prepare_by_platform import prepare_by_platform # add quant nodes for specific Backend + from mqbench.prepare_by_platform import BackendType # contain various Backend, contains Academic. + from mqbench.utils.state import enable_calibration # turn on calibration algorithm, determine scale, zero_point, etc. + from mqbench.utils.state import enable_quantization # turn on actually quantization, like FP32 -> INT8 + + model = models.__dict__["resnet18"](pretrained=True) # use vision pre-defined model + model.eval() + +**2**. **Then we learn the extra configration to custom Academic Backend.** + +You can also learn this section through MQBench `source code `_. +Learn which option you can choose below config through our :doc:`../user_guide/internal/learn_config` + +.. code-block:: python + + extra_config = { + 'w_observer': MSEObserver, # custom weight observer + 'a_observer': MSEObserver, # custom activation observer + 'w_fakequantize': FixedFakeQuantize, # custom weight fake quantize function + 'a_fakequantize': FixedFakeQuantize, # custom activation fake quantize function + 'w_qscheme': { + 'bit': 8, # custom bitwidth for weight, + 'symmetry': False, # custom whether quant is symmetric for weight, + 'per_channel': True, # custom whether quant is per-channel or per-tensor for weight, + 'pot_scale': False, # custom whether scale is power of two for weight. + }, + 'a_qscheme': { + 'bit': 8, # custom bitwidth for activation, + 'symmetry': False, # custom whether quant is symmetric for activation, + 'per_channel': True, # custom whether quant is per-channel or per-tensor for activation, + 'pot_scale': False, # custom whether scale is power of two for activation. + } + } + +**3**. **The next step prepares to conduct the experiment, take PTQ as example.** + +.. code-block:: python + + model = prepare_by_platform(model, + BackendType.Academic, extra_config) #! 1. trace model and add quant nodes for model on Academic Backend + + enable_calibration(model) #! 2. turn on calibration, ready for gathering data + + # calibration loop + for i, batch in enumerate(data): + # do forward procedures + ... + + enable_quantization(model) #! 3. turn on actually quantization, ready for simulating Backend inference + + # evaluation loop + for i, batch in enumerate(data): + # do forward procedures + ... + +**You already know all basics about how to validate your marvelous quantization idea with MQBench, congratulations!** + +Now you can follow our advanced :doc:`user guide <../developer_guide/index>` and :doc:`developer guide <../user_guide/index>` to know more about MQBench. diff --git a/docs/source/get_started/quick_start_deploy.rst b/docs/source/get_started/quick_start_deploy.rst new file mode 100644 index 0000000..4eed855 --- /dev/null +++ b/docs/source/get_started/quick_start_deploy.rst @@ -0,0 +1,57 @@ +Quick Start -- Deploy Just in 4 Lines +================================================ + +This page is for engineers **who want to deploy models to the production environment using MQBench**, +if you want to know how to do research with MQBench, check :doc:`quick_start_academic`. + +MQBench is a benchmark and framework for evaluating the quantization algorithms under real-world hardware deployments. +By using MQBench backend presets, you can do **hardware alignment** easily, which means **what you get from MQBench is what you put on your hardware**. + +Before learning the internal of MQBench, we provide a simple tutorial to help you to start up your business quickly. +MQBench is designed easy-to-use, for example, you can deploy your FP32 pre-trained model **JUST by inserting 4 lines** of code. +We provide step-by-step instructions and detailed comments below to help you finish deploying the **PyTorch ResNet18** model to **TensorRT** Backend. + +Before starting, you should install MQBench first. Now we start the tour. + +**1**. **To begin with, let's import MQBench and prepare FP32 model.** + +.. code-block:: python + + import torchvision.models as models # for example model + from mqbench.prepare_by_platform import prepare_by_platform # add quant nodes for specific Backend + from mqbench.prepare_by_platform import BackendType # contain various Backend, like TensorRT, NNIE, etc. + from mqbench.utils.state import enable_calibration # turn on calibration algorithm, determine scale, zero_point, etc. + from mqbench.utils.state import enable_quantization # turn on actually quantization, like FP32 -> INT8 + from mqbench.convert_deploy import convert_deploy # remove quant nodes for deploy + + model = models.__dict__["resnet18"](pretrained=True) # use vision pre-defined model + model.eval() + +**2**. **The next step prepares to quantize the model.** + +.. code-block:: python + + model = prepare_by_platform(model, BackendType.Tensorrt) #! line 1. trace model and add quant nodes for model on Tensorrt Backend + enable_calibration(model) #! line 2. turn on calibration, ready for gathering data + + # calibration loop + for i, batch in enumerate(data): + # do forward procedures + ... + + enable_quantization(model) #! line 3. turn on actually quantization, ready for simulating Backend inference + + # evaluation loop + for i, batch in enumerate(data): + # do forward procedures + ... + + # define dummy data for model export. + input_shape={'data': [10, 3, 224, 224]} + convert_deploy(model, BackendType.Tensorrt, input_shape) #! line 4. remove quant nodes, ready for deploying to real-world hardware + +If you want to know more about deploying to a customize backend, check :doc:`../user_guide/internal/learn_config` and :doc:`../user_guide/howtodeploy` + +**Now you can use exported files to test on real hardware using TensorRT as Backend, congratulations!** + +Now you can follow our advanced :doc:`user guide <../developer_guide/index>` and :doc:`developer guide <../user_guide/index>` to know more about MQBench. diff --git a/docs/source/get_started/setup.rst b/docs/source/get_started/setup.rst new file mode 100644 index 0000000..fff70ad --- /dev/null +++ b/docs/source/get_started/setup.rst @@ -0,0 +1,13 @@ +Installation +============ + +MQBench only depend on PyTorch 1.8.1,following `pytorch.org `_ or use requirements file to install. + +.. code-block:: shell + :linenos: + + cd /path_of_mqbench # change dir to MQBench + pip install -r requirements.txt # install MQBench dependencies + python setup.py install # install MQBench + +You have done installation of MQBench, check :doc:`quick_start_academic` or check :doc:`quick_start_deploy` to get started with MQBench. \ No newline at end of file diff --git a/docs/source/get_started/support_matrix.rst b/docs/source/get_started/support_matrix.rst new file mode 100644 index 0000000..10e3595 --- /dev/null +++ b/docs/source/get_started/support_matrix.rst @@ -0,0 +1,24 @@ +Support Matrix +============== + +**Now we support this hardwares and algorithms, updating.** + +Hardware & Backend +^^^^^^^^^^^^^^^^^^ + +1. :doc:`../user_guide/deploy/tensorrt` +2. :doc:`../user_guide/deploy/snpe` +3. NNIE +4. Vitis +5. ONNX_QNN + +Algorithm +^^^^^^^^^ + +1. `LSQ `_ +2. `LSQ+ `_ +3. `DSQ `_ +4. `PACT `_ +5. `APoT `_ +6. `AdaRound `_ +7. `QIL `_ \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index 5242a68..cb2b67e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -9,11 +9,12 @@ Welcome to MQBench's documentation! .. toctree:: :maxdepth: 2 :caption: Contents: - - example/index - algorithm/index - hardware/index - api/modules + + get_started/index + user_guide/index + developer_guide/index + benchmark/index + api_reference/index Indices and tables diff --git a/docs/source/user_guide/PTQ/adaround.rst b/docs/source/user_guide/PTQ/adaround.rst new file mode 100644 index 0000000..d4d2252 --- /dev/null +++ b/docs/source/user_guide/PTQ/adaround.rst @@ -0,0 +1,65 @@ +AdaRound +======== + +`AdaRound `_ aims to find the global optimal strategy of rounding the quantized values. In common sense, rounding-to-nearest is optimal for each individual value, but through threoretical analysis on the quantization loss, it's not the case for the entire network or the whole layer. The second order term in the difference contains cross term of the round error, illustrated in a layer of two weights: + +.. raw:: latex html + + \[ E[ L(x,y,\mathbf{w}) - L(x,y,\mathbf{w}+\Delta \mathbf{w}) ] \approx \Delta \mathbf{w}^T g^{(\mathbf{w})} + \frac12 \Delta \mathbf{w}^T H^{(\mathbf{w})} \Delta \mathbf{w} \approx \Delta \mathbf{w}_1^2 + \Delta \mathbf{w}_2^2 + \Delta \mathbf{w}_1 \Delta \mathbf{w}_2 \] + +Hence, it's benificial to learn a rounding mask for each layer. One well-designed object function is given by the authors: + +.. raw:: latex html + + \[ \mathop{\arg\min}_{\mathbf{V}}\ \ || Wx-\tilde{W}x ||_F^2 + \lambda f_{reg}(\mathbf{V}), \] + \[ \tilde{W}=s \cdot clip\left( \left\lfloor\dfrac{W}{s}\right\rfloor+h(\mathbf{V}), n, p \right) \] + +where :math:`h(\mathbf{V}_{i,j})=clip(\sigma(\mathbf{V}_{i,j})(\zeta-\gamma)+\gamma, 0, 1)`, and :math:`f_{reg}(\mathbf{V})=\mathop{\sum}_{i,j}{1-|2h(\mathbf{V}_{i,j})-1|^\beta}`. By annealing on :math:`\beta`, the rounding mask can adapt freely in initial phase and converge to 0 or 1 in later phase. + +.. code-block:: python + :linenos: + + import torchvision.models as models + from mqbench.convert_deploy import convert_deploy + from mqbench.prepare_by_platform import prepare_qat_fx_by_platform, BackendType + from mqbench.utils.state import enable_calibration, enable_quantization + from mqbench.adaround import adaround + + # first, initialize the FP32 model with pretrained parameters. + model = models.__dict__["resnet18"](pretrained=True) + + # then, we will trace the original model using torch.fx and \ + # insert fake quantize nodes according to different hardware backends (e.g. TensorRT). + model = prepare_by_platform(model, BackendType.Tensorrt) + + # before training, we recommend to enable observers for calibration in several batches, and then enable quantization. + model.eval() + enable_calibration(model) + calibration_flag = True + + # set adaround config + adaround_config_dict = { + adaround: True, + warm_up: 0.2, + weight: 0.01, + max_count: 10000, + b_range: [20, 2], + keep_gpu: True, + round_mode: learned_hard_sigmoid} + + # adaround loop + stacked_tensor = [] + # add calibration data to stack + for i, batch_data in enumerate(data): + if i == cali_batchsize: + break + stacked_tensor.append(batch_data) + # start calibration + enable_quantization(model) + model = adaround(model, stacked_tensor, adaround_config_dict) + + # do evaluation + ... + + # deploy model, remove fake quantize nodes and dump quantization params like clip ranges. + convert_deploy(model.eval(), BackendType.Tensorrt, input_shape_dict={'data': [10, 3, 224, 224]}) \ No newline at end of file diff --git a/docs/source/user_guide/PTQ/naive.rst b/docs/source/user_guide/PTQ/naive.rst new file mode 100644 index 0000000..2082fcb --- /dev/null +++ b/docs/source/user_guide/PTQ/naive.rst @@ -0,0 +1,33 @@ +Naive PTQ +========= + +MQBench provides a simple API for naive PTQ, learn our step-by-step instructions to quantize your model. You can also see :doc:`../../get_started/quick_start_academic` for more details. + +.. code-block:: python + :linenos: + + import torchvision.models as models # PyTorch model + from mqbench.prepare_by_platform import prepare_by_platform # add quant nodes for specific Backend + from mqbench.prepare_by_platform import BackendType # contain various Backend, like TensorRT, NNIE, etc. + from mqbench.utils.state import enable_calibration # turn on calibration algorithm, determine scale, zero_point, etc. + from mqbench.utils.state import enable_quantization # turn on actually quantization, like FP32 -> INT8 + + model = models.__dict__["resnet18"](pretrained=True) # use vision pre-defined model + model.eval() + + model = prepare_by_platform(model, BackendType.Tensorrt) #! line 1. trace model and add quant nodes for model on Tensorrt Backend + enable_calibration(model) #! line 2. turn on calibration, ready for gathering data + + # calibration loop + for i, batch in enumerate(data): + # do forward procedures + ... + + enable_quantization(model) #! line 3. turn on actually quantization, ready for simulating Backend inference + + # evaluation loop + for i, batch in enumerate(data): + # do forward procedures + ... + +Now you know how to conduct naive PTQ with MQBench, if you want to know more about customize backend check :doc:`../internal/learn_config`. \ No newline at end of file diff --git a/docs/source/user_guide/QAT/naive.rst b/docs/source/user_guide/QAT/naive.rst new file mode 100644 index 0000000..fa1762d --- /dev/null +++ b/docs/source/user_guide/QAT/naive.rst @@ -0,0 +1,44 @@ +Naive QAT +============ + +The training only requires some additional operations compared to ordinary fine-tune. + +.. code-block:: python + :linenos: + + import torchvision.models as models + from mqbench.convert_deploy import convert_deploy + from mqbench.prepare_by_platform import prepare_qat_fx_by_platform, BackendType + from mqbench.utils.state import enable_calibration, enable_quantization + + # first, initialize the FP32 model with pretrained parameters. + model = models.__dict__["resnet18"](pretrained=True) + model.train() + + # then, we will trace the original model using torch.fx and \ + # insert fake quantize nodes according to different hardware backends (e.g. TensorRT). + model = prepare_qat_fx_by_platform(model, BackendType.Tensorrt) + + # before training, we recommend to enable observers for calibration in several batches, and then enable quantization. + model.eval() + enable_calibration(model) + + # calibration loop + for i, batch in enumerate(data): + # do forward procedures + ... + + model.train() + enable_quantization(model) + # training loop + for i, batch in enumerate(data): + # do forward procedures + ... + + # do backward and optimization + ... + + # deploy model, remove fake quantize nodes and dump quantization params like clip ranges. + convert_deploy(model.eval(), BackendType.Tensorrt, input_shape_dict={'data': [10, 3, 224, 224]}) + +Now you know how to conduct naive QAT with MQBench, if you want to know more about customize backend check :doc:`../internal/learn_config`. \ No newline at end of file diff --git a/docs/source/user_guide/algorithm/index.rst b/docs/source/user_guide/algorithm/index.rst new file mode 100644 index 0000000..6505084 --- /dev/null +++ b/docs/source/user_guide/algorithm/index.rst @@ -0,0 +1,107 @@ +Quantization Algorithm +=========================== + +.. _LSQ: https://arxiv.org/abs/1902.08153 +.. _LSQ plus: https://arxiv.org/abs/2004.09576 +.. _DSQ: https://arxiv.org/abs/1908.05033 +.. _PACT: https://arxiv.org/abs/1805.06085 +.. _APoT: https://arxiv.org/abs/1909.13144 +.. _opensource codes: https://github.com/yhhhli/APoT_Quantization +.. _weight standardization: https://github.com/joe-siyuan-qiao/WeightStandardization +.. _QIL: https://arxiv.org/abs/1808.05779 +.. _AdaRound: https://arxiv.org/abs/2004.10568 + + +Post-training Quantization v.s. Quantization-aware Training +----------------------------------------------------------------------- + +1. Post Training Quantization (PTQ): + + Quantize a pre-trained network with limited data and computation resources, including activation range estimation, bn statistics update and other tuning techniques. + +2. Quantization Aware Training (QAT): + + End-to-end Finetuning a pre-trained full-precision model, this requires all training data and huge computation resource. + +QAT Algorithms +--------------------------------- + +**Learned Step Size Quantization**: + +`LSQ`_ leverages the Straight-Through Estimator (i.e. directly pass the gradient in round operation) to learn the quantization scale for each layer. +Please refer to the original paper for detailed derivation of the scale gradient. +For initialization, we use the method proposed in original paper: the scale is determined by :math:`s= \frac{2||\mathbf{w}||_1}{\sqrt{N_{max}}}`. For symmetric quantization, the zero point is initialized to 0, and kept fixed. For asymmetric quantization, zero point is initialized to :math:`N_{min}` if the activation is non-negative. Inspired by `LSQ plus`_, the zero point can also be updated through backpropagation with the help of STE. Therefore we make it learnable in asymmetric quantization. +LSQ uses gradient scale to stabilize the scale learning. The gradient scale is determined by :math:`\frac{1}{\sqrt{MN_{max}}}` where :math:`M` is the number of elements in that tensor. We extend this gradient scale to per-channel weight learning, where the :math:`M` is the number of weights in each filter. + + +**Differentiable Soft Quantization**: + +`DSQ`_ uses the hyperbolic tangent function to approximate the conventionally adopted STE. In our implementation, we use :math:`\alpha=0.4` (for definition please refer to the original paper) which controls the shape and smoothness of the :math:`\mathrm{tanh}` function. For weight quantization, we use the min-max range as + +.. raw:: latex html + + \[Clip_{min} = \mu(\mathbf{w}) - 2.6\sigma(\mathbf{w}) \] + \[Clip_{max} = \mu(\mathbf{w}) + 2.6\sigma(\mathbf{w}) \] + + +where :math:`\mu(\cdot)` and :math:`\sigma(\cdot)` compute the mean and standard deviation of the tensor. Then, the scale is determined by :math:`s=\frac{\max(-Clip_{min}, Clip_{max})}{N_{max}-N_{min}}` for symmetric quantization, and :math:`\frac{Clip_{max}-Clip_{min}}{N_{max}-N_{min}}` for asymmetric quantization. The zero point is set to 0 for symmetric and :math:`N_{min}-\lfloor \frac{Clip_{min}}{s}\rceil` for asymmetric quantization. For activation, we use the BatchMinMax as the clipping range, i.e. the averaged min-max range across the batch dimension. This is further updated with exponential moving average across different batches with momentum 0.9, similar to Batch Normalization. + +**Parameterized Clipping Activation**: + +`PACT`_ is introduced to quantized activation by learning the clipping threshold through STE. Its activation is clipped by a parameter :math:`\alpha` first. Then, the clipped activation is quantized and re-quantized. Although PACT and LSQ both learns the scale, they have three differences. First, the clipping range in PACT is handcrafted initialized to 6 while LSQ initialization is based on the tensor :math:`L1` norm. Second, PACT has no gradient in the range of clipping. While LSQ can compute the gradient. Third, PACT does not scale the gradient of :math:`\alpha`, while LSQ does. +Note that PACT only has non-negative, unsigned quantization in the first. To extend it to our hardware settings, we clip the activation to :math:`(-\alpha, \alpha)` in symmetric case and :math:`(\beta, \alpha)` for asymmetric case, (where :math:`\beta` is initialized to :math:`-6`). +For weight quantization of PACT, it is the same with DoReFa-Net. + +**DoReFa-Net**: + +DoReFa-Net simply clips the activation to :math:`[0, 1]` and then quantizes it. This is based on the intuition that most activation will fall into this range in old network architectures, e.g. AlexNet and ResNet. In hardware settings, we modify the activation range to :math:`[-1, 1]` for both symmetric and asymmetric quantization. As for weight quantization, it can be described as: + +.. raw:: latex html + + \[\tilde{\mathbf{w}} = \mathrm{tanh}(\mathbf{w}) \frac{1}{\max(|\mathrm{tanh}(\mathbf{w})|)} \] + \[\hat{\mathbf{w}} = \mathrm{dequantize}(\mathrm{quantize(\tilde{\mathbf{w}})}) \] + +where the first step is a non-linear transformation and the second step is the quantization and the de-quantization. The scale is simply calculated by :math:`\frac{2}{N_{max}-N_{min}}` for symmetric quantization and :math:`\frac{\max(\tilde{\mathbf{w}}) - \min(\tilde{\mathbf{w}})}{N_{max}-N_{min}}` for asymmetric quantization. + + +**Additive Powers-of-Two Quantization**: + +`APoT`_ quantization uses multiple PoT's (Powers-of-Two) combination to composes a set of non-uniform quantization levels. Since the quantization are non-uniform in most cases (except the case of 2-bit the APoT becomes uniform quantization), we do not benchmark it on real hardware. Additionally, APoT introduces weight normalization (similar to `weight standardization`_ technique) to smooth the learning process of clipping range in weight. However, it is unclear how to incoporate this technique with BN folding. +Therefore, we only reproduce it in our academic setting. The implementation are based on the `opensource codes`_. + + + +**Quantization Interval Learning**: + +`QIL`_ composes of two unit to quantization: (1) the first one is called transformer, which transform the weights or activation to :math:`[-1, 1]` (:math:`[0, 1]` as for non-negative activation). +This transformer also has two functionalities: pruning and non-linearity. +(2) The second one is called quantizer, given by + +.. raw:: latex html + + \[ \tilde{\mathbf{w}} = \mathrm{clip}\left((\alpha |\mathbf{w}| + \beta)^{\gamma}, 0, 1\right) * \mathrm{sign}(\mathbf{w})\] + \[ \hat{\mathbf{w}} = \mathrm{dequantize}(\mathrm{quantize(\tilde{\mathbf{w}})}), \] + +where :math:`\alpha = \frac{1}{2*D}` and :math:`\beta=-\frac{C}{2D}+\frac{1}{2}`. This transformation maps the weight from :math:`[C-D, C+D]` to :math:`[0, 1]` and :math:`[-C-D, -C+D]` to :math:`[-1, 0]`. As a result, the weights between :math:`[-C+D, C-D]` are pruned. The non-linearity of the transformation function is introduced by $\gamma$. This parameter can control the linearity and thus control the quantization interval. However, we find this technique is extremely unstable. In our experimental reproduction, learning $\gamma$ will not converge. In the original paper, the gradient scale of :math:`C` and :math:`D` is set to 0.01. We find this gradient scale also leads to frequent crashes. Thus we use the gradient scale introduced in LSQ, i.e. :math:`\frac{1}{\sqrt{MN_{max}}}`. + + +PTQ Algorithms +------------------------------ + +**AdaRound**: + +`AdaRound`_ aims to find the global optimal strategy of rounding the quantized values. In common sense, rounding-to-nearest is optimal for each individual value, but through threoretical analysis on the quantization loss, it's not the case for the entire network or the whole layer. The second order term in the difference contains cross term of the round error, illustrated in a layer of two weights: + +.. raw:: latex html + + \[ E[ L(x,y,\mathbf{w}) - L(x,y,\mathbf{w}+\Delta \mathbf{w}) ] \approx \Delta \mathbf{w}^T g^{(\mathbf{w})} + \frac12 \Delta \mathbf{w}^T H^{(\mathbf{w})} \Delta \mathbf{w} \approx \Delta \mathbf{w}_1^2 + \Delta \mathbf{w}_2^2 + \Delta \mathbf{w}_1 \Delta \mathbf{w}_2 \] + +Hence, it's benificial to learn a rounding mask for each layer. One well-designed object function is given by the authors: + +.. raw:: latex html + + \[ \mathop{\arg\min}_{\mathbf{V}}\ \ || Wx-\tilde{W}x ||_F^2 + \lambda f_{reg}(\mathbf{V}), \] + \[ \tilde{W}=s \cdot clip\left( \left\lfloor\dfrac{W}{s}\right\rfloor+h(\mathbf{V}), n, p \right) \] + +where :math:`h(\mathbf{V}_{i,j})=clip(\sigma(\mathbf{V}_{i,j})(\zeta-\gamma)+\gamma, 0, 1)`, and :math:`f_{reg}(\mathbf{V})=\mathop{\sum}_{i,j}{1-|2h(\mathbf{V}_{i,j})-1|^\beta}`. By annealing on :math:`\beta`, the rounding mask can adapt freely in initial phase and converge to 0 or 1 in later phase. + diff --git a/docs/source/user_guide/deploy/snpe.rst b/docs/source/user_guide/deploy/snpe.rst new file mode 100644 index 0000000..0266cf4 --- /dev/null +++ b/docs/source/user_guide/deploy/snpe.rst @@ -0,0 +1,58 @@ +SNPE +==== + +Introduction +^^^^^^^^^^^^ + +`Snapdragon Neural Processing Engine (SNPE) `_ is a Qualcomm Snapdragon software accelerated runtime for the execution of deep neural networks. + +.. _SNPE Quantization Scheme: + +**Quantization Scheme** + +8/16 bit per-layer asymmetric linear quantization. + +.. math:: + + \begin{equation} + q = \mathtt{clamp}\left(\left\lfloor R * \dfrac{x - cmin}{cmax - cmin} \right\rceil, lb, ub\right) + \end{equation} + +where :math:`R` is the integer range after quantization, :math:`cmax` and :math:`cmin` are calculated range of the floating values, :math:`lb` and :math:`ub` are bounds of integer range. +Taking 8bit as an example, R=255, [lb, ub]=[0,255]. + + +Deploy on SNPE +^^^^^^^^^^^^^^ + +**Requirements**: + +- Install SNPE SDK from `QualComm `_ (Suggest Ubuntu 18.04) + +**Deployment**: + +- Convert PyTorch checkpoint to `snpe_deploy.onnx` and dump clip ranges to `snpe_clip_ranges.json`: + + .. code-block:: python + :linenos: + + from mqbench.convert_deploy import convert_deploy + input_dict = {'x': [1, 3, 224, 224]} + convert_deploy(solver.model.module, BackendType.SNPE, input_dict) + +- Convert `.onnx` file to `.dlc` format (supported by SNPE): + + .. code-block:: shell + :linenos: + + # Note that, the `.json` file contains activation ranges for quantization, but it's required here although the model hasn't been quantized now. + snpe-onnx-to-dlc --input_network ./snpe_deploy.onnx --output_path ./snpe_deploy.dlc --quantization_overrides ./snpe_clip_ranges.json + +- Quantize the model with parameters overridden: + + .. code-block:: shell + :linenos: + + # The `data.txt` records paths to image data for calibration (not important since we will override parameters) which will be loaded by `numpy.fromfile(dtype=np.float32)` and have shape of `(224, 224, 3)`. And this file is required for test. + # Now we get the final model `snpe_deploy_quantized.dlc` + snpe-dlc-quantize --input_dlc ./snpe_deploy.dlc --input_list ./data.txt --override_params --bias_bitwidth 32 \ No newline at end of file diff --git a/docs/source/user_guide/deploy/tensorrt.rst b/docs/source/user_guide/deploy/tensorrt.rst new file mode 100644 index 0000000..5af9ab1 --- /dev/null +++ b/docs/source/user_guide/deploy/tensorrt.rst @@ -0,0 +1,56 @@ +TensorRT +======== + +Introduction +^^^^^^^^^^^^ + +`NVIDIA TensorRT `_ is a platform for high-performance deep learning inference on GPU device. + +.. _TensorRT Quantization Scheme: + +**Quantization Scheme** + +8bit per-channel symmetric linear quantization. + +.. math:: + + \begin{equation} + q = \mathtt{clamp}(\lfloor x * s \rceil, lb, ub) + \end{equation} + +where :math:`s` is scaling factor to quantize a number from floating range to integer range, :math:`lb` and :math:`ub` are bounds of integer range. +For weights, [lb, ub] = [-127, 127]. For activations, [lb, ub] = [-128, 127]. + +For weights, each filter needs an independent scale :math:`s`. + +Deploy on TensorRT +^^^^^^^^^^^^^^^^^^ + +**Requirements**: + +- Install TensorRT=7.2.1.6 from `NVIDIA `_ + +**Deployment**: + +We provide the example to deploy the quantized model to TensorRT. + +- First export the quantized model to ONNX [tensorrt_deploy_model.onnx] and dump the clip ranges [tensorrt_clip_ranges.json] for activations. + + .. code-block:: shell + :linenos: + + python main.py -a [model_name] --resume [model_save_path] + +- Second build the TensorRT INT8 engine and evaluate, please make sure [dataset_path] contains subfolder [val]. + + .. code-block:: shell + :linenos: + + python onnx2trt.py --onnx [tensorrt_deploy_model.onnx] --trt [model_name.trt] --clip [tensorrt_clip_ranges.json] --data [dataset_path] --evaluate + +- If you don’t pass in external clip ranges [tensorrt_clip_ranges.json], TenosrRT will do calibration using default algorithm IInt8EntropyCalibrator2 with 100 images. So, please make sure [dataset_path] contains subfolder [cali]. + + .. code-block:: shell + :linenos: + + python onnx2trt.py --onnx [tensorrt_deploy_model.onnx] --trt [model_name.trt] --data [dataset_path] --evaluate diff --git a/docs/source/user_guide/hardware/index.rst b/docs/source/user_guide/hardware/index.rst new file mode 100644 index 0000000..0c606e8 --- /dev/null +++ b/docs/source/user_guide/hardware/index.rst @@ -0,0 +1,9 @@ +Quantization Hardware +======================================== + +.. toctree:: + :maxdepth: 1 + + nnie + tensorrt + snpe diff --git a/docs/source/user_guide/hardware/nnie.rst b/docs/source/user_guide/hardware/nnie.rst new file mode 100644 index 0000000..c70a131 --- /dev/null +++ b/docs/source/user_guide/hardware/nnie.rst @@ -0,0 +1,88 @@ +NNIE +==== +NNIE is a Neural Network Inference Engine of Hisilicon. It support INT8/INT16 quantization. + +.. _NNIE Quantization Scheme: + +Quantization Scheme +--------------------- +8/16 bit per-layer logarithmic quantization. + +The specific quantization formulation is: + +.. math:: + + \begin{equation} + \begin{aligned} + &z = \lfloor 16 * \log_2(c) \rceil - 127 \\ + &\mathtt{fakequant(x)} = \begin{cases} + - 2 ^ {\dfrac{\mathtt{clamp}(\lfloor 16 * \log_2(-x) \rceil - z, 1, 127) + z}{16}}, & x \lt - 2 ^ {\dfrac{z + 1}{16} - 1} \\ + % 0, & - 2 ^ {\dfrac{z + 1}{16} - 1} \le x \lt 2 ^ {\dfrac{z}{16} - 1} \\ + 2 ^ {\dfrac{\mathtt{clamp}(\lfloor 16 * \log_2(x) \rceil - z, 0, 127) + z}{16}}, & x \ge 2 ^ {\dfrac{z}{16} - 1} \\ + zero, & otherwise + \end{cases} + \end{aligned} + \end{equation} + +where :math:`c` is clipping range. :math:`2 ^ {\dfrac{z}{16}}` is the smallest positive value that can be represented after quantization. + +It represents the integer number in *True Form* format. +The highest bit represents the sign and the rest represents the absolute value of the number. + +.. list-table:: + :header-rows: 1 + :align: center + + * - Floating Numer + - Integer Number + - Hexadecimal + - Dequantized Floating Number + * - :math:`\bigg(- \infty, - 2 ^ {\dfrac{z + 126.5}{16}}\bigg]` + - -127 + - 0xFF + - :math:`- 2 ^ {\dfrac{z+127}{16}}` + * - ... + - ... + - ... + - ... + * - :math:`\bigg(- 2 ^ {\dfrac{z + 2.5}{16}}, - 2 ^ {\dfrac{z + 1.5}{16}}\bigg]` + - -2 + - 0x82 + - :math:`- 2 ^ {\dfrac{z+2}{16}}` + * - :math:`\bigg(- 2 ^ {\dfrac{z + 1.5}{16}}, - 2 ^ {\dfrac{z + 1}{16} - 1}\bigg)` + - -1 + - 0x81 + - :math:`- 2 ^ {\dfrac{z+1}{16}}` + * - :math:`\bigg[- 2 ^ {\dfrac{z + 1}{16} - 1}, 2 ^ {\dfrac{z}{16} - 1}\bigg)` + - -0 + - 0x80 + - 0 + * - :math:`\bigg[2 ^ {\dfrac{z}{16} - 1}, 2 ^ {\dfrac{z + 0.5}{16}}\bigg)` + - 0 + - 0x00 + - :math:`2 ^ {\dfrac{z}{16}}` + * - :math:`\bigg[2 ^ {\dfrac{z + 0.5}{16}}, 2 ^ {\dfrac{z + 1.5}{16}}\bigg)` + - 1 + - 0x01 + - :math:`2 ^ {\dfrac{z+1}{16}}` + * - ... + - ... + - ... + - ... + * - :math:`\bigg[2 ^ {\dfrac{z + 126.5}{16}}, + \infty\bigg)` + - 127 + - 0x7F + - :math:`2 ^ {\dfrac{z+127}{16}}` + +NNIE performs a per-layer quantization, which means the inputs of the same layer share the same :math:`z_a` and the weights of the same layer share the same :math:`z_w`. + +In fact, when building engine using the official tool of NNIE, it requires the clipping value :math:`c` rather than :math:`z`. :math:`c` needs to be a number in the :download:`gfpq_param_table_8bit.txt` which ensures that :math:`16 * \log_2{c}` is an integer. + +.. attention:: + Pooling: ceil_mode = True + + Avoid using depthwise convolution. + + Only support 2x nearest neighbor upsample. + + For Detection task, you'd better choose RetinaNet structure. diff --git a/docs/source/user_guide/hardware/snpe.rst b/docs/source/user_guide/hardware/snpe.rst new file mode 100644 index 0000000..0d35459 --- /dev/null +++ b/docs/source/user_guide/hardware/snpe.rst @@ -0,0 +1,29 @@ +SNPE +========= + +`Snapdragon Neural Processing Engine (SNPE) `_ is a Qualcomm Snapdragon software accelerated runtime for the execution of deep neural networks. + +.. _SNPE Quantization Scheme: + +Quantization Scheme +-------------------- +8/16 bit per-layer asymmetric linear quantization. + +.. math:: + + \begin{equation} + q = \mathtt{clamp}\left(\left\lfloor R * \dfrac{x - cmin}{cmax - cmin} \right\rceil, lb, ub\right) + \end{equation} + +where :math:`R` is the integer range after quantization, :math:`cmax` and :math:`cmin` are calculated range of the floating values, :math:`lb` and :math:`ub` are bounds of integer range. +Taking 8bit as an example, R=255, [lb, ub]=[0,255]. + + +In fact, when building the SNPE with the official tools, it will firstly convert the model into *.dlc* model file of full precision, and then optionally change it into a quantized version. + +.. attention:: + Users can provide a .json file to override the parameters. + + The values of *scale* and *offset* are not required, but can be overrided. + + SNPE will adjust the values of *cmin* and *cmax* to ensure zero is representable. diff --git a/docs/source/user_guide/hardware/tensorrt.rst b/docs/source/user_guide/hardware/tensorrt.rst new file mode 100644 index 0000000..34a75de --- /dev/null +++ b/docs/source/user_guide/hardware/tensorrt.rst @@ -0,0 +1,23 @@ +TensorRT +========= + +`NVIDIA TensorRT `_ is a platform for high-performance deep learning inference on GPU device. + +.. _TensorRT Quantization Scheme: + +Quantization Scheme +-------------------- +8bit per-channel symmetric linear quantization. + +.. math:: + + \begin{equation} + q = \mathtt{clamp}(\lfloor x * s \rceil, lb, ub) + \end{equation} + +where :math:`s` is scaling factor to quantize a number from floating range to integer range, :math:`lb` and :math:`ub` are bounds of integer range. +For weights, [lb, ub] = [-127, 127]. For activations, [lb, ub] = [-128, 127]. + +For weights, each filter needs an independent scale :math:`s`. + +In fact, when building the TensorRT engine, the official tool requires the clipping value as quantization parameters, which can be calculated by :math:`c = s * 127`. diff --git a/docs/source/user_guide/howtodeploy.rst b/docs/source/user_guide/howtodeploy.rst new file mode 100644 index 0000000..eb1daf5 --- /dev/null +++ b/docs/source/user_guide/howtodeploy.rst @@ -0,0 +1,10 @@ +How to deploy the model +======================= + +.. toctree:: + :titlesonly: + + TensorRT + SNPE + + diff --git a/docs/source/user_guide/howtoptq.rst b/docs/source/user_guide/howtoptq.rst new file mode 100644 index 0000000..b625ded --- /dev/null +++ b/docs/source/user_guide/howtoptq.rst @@ -0,0 +1,8 @@ +How to conduct PTQ +=============================== + +.. toctree:: + :titlesonly: + + Naive PTQ + AdaRound diff --git a/docs/source/user_guide/howtoqat.rst b/docs/source/user_guide/howtoqat.rst new file mode 100644 index 0000000..4f4f1cb --- /dev/null +++ b/docs/source/user_guide/howtoqat.rst @@ -0,0 +1,7 @@ +How to conduct QAT +=============================== + +.. toctree:: + :titlesonly: + + Naive QAT diff --git a/docs/source/user_guide/index.rst b/docs/source/user_guide/index.rst new file mode 100644 index 0000000..e0a171c --- /dev/null +++ b/docs/source/user_guide/index.rst @@ -0,0 +1,11 @@ +User Guide +========== + +.. toctree:: + :maxdepth: 4 + :titlesonly: + + Learn MQBench configuration + howtoptq + howtoqat + howtodeploy diff --git a/docs/source/user_guide/internal/learn_config.rst b/docs/source/user_guide/internal/learn_config.rst new file mode 100644 index 0000000..71ad697 --- /dev/null +++ b/docs/source/user_guide/internal/learn_config.rst @@ -0,0 +1,66 @@ +Learn MQBench configuration +=========================== + +MQBench provides a primary API **prepare_by_platform** for users to quantize their model. +MQBench contains many backends presets for **hardware alignment**, but you maybe want to customize your backend. +We provide a guide for learning MQBench configuration, and it will be helpful. + +**1.** API **prepare_by_platform** accepts an extra param, you can provide it following this format. + +.. code-block:: python + + extra_config = { + 'w_observer': MSEObserver, # custom weight observer + 'a_observer': MSEObserver, # custom activation observer + 'w_fakequantize': FixedFakeQuantize, # custom weight fake quantize function + 'a_fakequantize': FixedFakeQuantize, # custom activation fake quantize function + 'w_qscheme': { + 'bit': 8, # custom bitwidth for weight, + 'symmetry': False, # custom whether quant is symmetric for weight, + 'per_channel': True, # custom whether quant is per-channel or per-tensor for weight, + 'pot_scale': False, # custom whether scale is power of two for weight. + }, + 'a_qscheme': { + 'bit': 8, # custom bitwidth for activation, + 'symmetry': False, # custom whether quant is symmetric for activation, + 'per_channel': True, # custom whether quant is per-channel or per-tensor for activation, + 'pot_scale': False, # custom whether scale is power of two for activation. + } + } + + +**2.** **Customize just by:** + +.. code-block:: python + + prepared = prepare_by_platform(model, backend, extra_config) + +**3.** **Now MQBench support this Observers and Quantizers** + +Observer +^^^^^^^^ + +.. code-block:: markdown + + 1. MinMaxObserver + 2. EMAMinMaxObserver # More general choice + 3. MinMaxFloorObserver # For Vitis HW + 4. EMAMinMaxFloorObserver # For Vitis HW + 5. EMAQuantileObserver # Quantile observer. + 6. ClipStdObserver # Usually used for DSQ. + 7. LSQObserver # Usually used for LSQ. + 8. MSEObserver + 9. EMAMSEObserver + +Quantizer +^^^^^^^^^ +.. code-block:: markdown + + 1. FixedFakeQuantize # Unlearnable scale/zeropoint + 2. LearnableFakeQuantize # Learnable scale/zeropoint + 3. NNIEFakeQuantize # Quantize function for NNIE + 4. DoReFaFakeQuantize # Dorefa + 5. DSQFakeQuantize # DSQ + 6. PACTFakeQuantize # PACT + 7. TqtFakeQuantize # TQT + 8. AdaRoundFakeQuantize \ No newline at end of file diff --git a/mqbench/adaround.py b/mqbench/adaround.py index 88cff0c..f2103a7 100644 --- a/mqbench/adaround.py +++ b/mqbench/adaround.py @@ -70,8 +70,8 @@ def __call__(self, t): return self.end_b else: rel_t = (t - self.start_decay) / (self.t_max - self.start_decay) - return self.end_b + 0.5 * (self.start_b - - self.end_b) * (1 + np.cos(rel_t * np.pi)) + # return self.end_b + (self.start_b - self.end_b) * max(0.0, (1 - rel_t)) + return self.end_b + 0.5 * (self.start_b - self.end_b) * (1 + np.cos(rel_t * np.pi)) class LossFunction: @@ -151,7 +151,6 @@ def layer_reconstruction(layer, cached_inps, cached_oups, config): w_opt.zero_grad() out_quant = layer(*cur_inp) - err = loss_func(out_quant, cur_out) err /= world_size err.backward() @@ -168,24 +167,28 @@ def layer_reconstruction(layer, cached_inps, cached_oups, config): def adaround(model, cali_data, config): - # TODO: assert model is on cuda and todo: cali_data might be on cpu, put it on corresponding device + # assert model is on cuda + if not config.keep_gpu: + cali_data = [inp.cpu() for inp in cali_data] '''set state first''' + + fp32_model = model + fp32_model.eval() quant_model = deepcopy_graphmodule(model) quant_model.eval() - model.eval() - disable_all(model) + disable_all(fp32_model) enable_quantization(quant_model) torch.cuda.empty_cache() - nodes = list(model.graph.nodes) - modules = dict(model.named_modules()) + nodes = list(quant_model.graph.nodes) + fp32_modules = dict(fp32_model.named_modules()) quant_modules = dict(quant_model.named_modules()) for node in nodes: - if node.op == "call_module" and isinstance(modules[node.target], _ADAROUND_SUPPORT_TYPE): + if node.op == "call_module" and isinstance(fp32_modules[node.target], _ADAROUND_SUPPORT_TYPE): '''if you want to do layer reconstruction, please do layer_reconstruction''' logger.info('prepare layer reconstruction for {}'.format(node.target)) - module = modules[node.target] + fp32_module = fp32_modules[node.target] quant_module = quant_modules[node.target] - cached_oups = save_inp_oup_data(model, module, cali_data, store_inp=False, store_oup=True, + cached_oups = save_inp_oup_data(fp32_model, fp32_module, cali_data, store_inp=False, store_oup=True, keep_gpu=config.keep_gpu) cached_inps = save_inp_oup_data(quant_model, quant_module, cali_data, store_inp=True, store_oup=False, keep_gpu=config.keep_gpu) diff --git a/mqbench/convert_deploy.py b/mqbench/convert_deploy.py index 6bcb7bf..109f8f3 100644 --- a/mqbench/convert_deploy.py +++ b/mqbench/convert_deploy.py @@ -53,12 +53,23 @@ def convert_onnx(model: GraphModule, input_shape_dict, dummy_input, onnx_model_p input_names = list(dummy_input.keys()) dummy_input = tuple(dummy_input.values()) with torch.no_grad(): - torch.onnx.export(model, dummy_input, onnx_model_path, - input_names=input_names, - opset_version=11, - do_constant_folding=True, - custom_opsets={'' : 11}, - enable_onnx_checker=False) + try: + from torch.onnx.utils import ONNXCheckerError + try: + torch.onnx.export(model, dummy_input, onnx_model_path, + input_names=input_names, + opset_version=11, + do_constant_folding=True, + custom_opsets={'' : 11}) + except ONNXCheckerError: + pass + except ImportError: + torch.onnx.export(model, dummy_input, onnx_model_path, + input_names=input_names, + opset_version=11, + do_constant_folding=True, + custom_opsets={'' : 11}, + enable_onnx_checker=False) @register_deploy_function(BackendType.NNIE) @@ -116,7 +127,7 @@ def convert_deploy(model: GraphModule, backend_type: BackendType, output_path (str, optional): path to save convert results. Defaults to './'. model_name (str, optional): name of converted onnx model. Defaults to 'mqbench_qmodel'. - >>> note on input_shape_dict: + >>> note on input_shape_dict: example: {'input_0': [1, 3, 224, 224] 'input_1': [1, 3, 112, 112] } diff --git a/mqbench/custom_quantizer.py b/mqbench/custom_quantizer.py index 32d7b69..a1130d1 100644 --- a/mqbench/custom_quantizer.py +++ b/mqbench/custom_quantizer.py @@ -35,12 +35,14 @@ import mqbench.nn as qnn import mqbench.nn.intrinsic as qnni import mqbench.nn.intrinsic.qat as qnniqat +from mqbench.utils import is_symmetric_quant from mqbench.utils.logger import logger from mqbench.utils.registry import register_model_quantizer from mqbench.prepare_by_platform import BackendType from mqbench.fake_quantize.tqt import TqtFakeQuantize +@register_model_quantizer(BackendType.Tensorrt) @register_model_quantizer(BackendType.NNIE) class ModelQuantizer(object): """General model quantizer class. @@ -224,6 +226,7 @@ def _find_act_quants(self, model: GraphModule) -> (set, set): ((node.op == 'call_function' or node.op == 'call_method') and node.target in self.exclude_function_type) or \ node.name in self.exclude_node_name: + logger.info("Exclude skip: {}".format(node.name)) continue if (node.op == "call_module" and isinstance(modules[node.target], self.module_type_to_quant_input)) or \ ((node.op == 'call_function' or node.op == 'call_method') and @@ -286,14 +289,9 @@ def prepare(self, model: GraphModule, qconfig): def _weight_quant(self, model: GraphModule, qconfig): logger.info("Replace module to qat module.") wqconfig_8bit = copy.deepcopy(qconfig) - - wq_symmetry = True if qconfig.weight.p.keywords['qscheme'] == torch.per_channel_symmetric or qconfig.weight.p.keywords[ - 'qscheme'] == torch.per_tensor_symmetric else False + wq_symmetry = True if is_symmetric_quant(qconfig.weight.p.keywords['qscheme']) else False wqconfig_8bit.weight.p.keywords['quant_min'] = -2 ** (8 - 1) if wq_symmetry else 0 wqconfig_8bit.weight.p.keywords['quant_max'] = 2 ** (8 - 1) - 1 if wq_symmetry else 2 ** 8 - 1 - - # wqconfig_8bit.weight.p.keywords['quant_min'] = -128 - # wqconfig_8bit.weight.p.keywords['quant_max'] = 127 for name, module in model.named_modules(): if name in self.io_module.keys(): logger.info("Set layer {} to 8 bit.".format(name)) @@ -348,10 +346,7 @@ def _insert_fake_quantize_for_act_quant(self, model: GraphModule, qconfig): node_to_quantize_output = OrderedDict.fromkeys(node_to_quantize_output).keys() aqconfig_8bit = copy.deepcopy(qconfig.activation) - - aq_symmetry = True if qconfig.activation.p.keywords['qscheme'] == torch.per_channel_symmetric or \ - qconfig.activation.p.keywords['qscheme'] == torch.per_tensor_symmetric else False - + aq_symmetry = True if is_symmetric_quant(qconfig.activation.p.keywords['qscheme']) else False aqconfig_8bit.p.keywords['quant_min'] = -2 ** (8 - 1) if aq_symmetry else 0 aqconfig_8bit.p.keywords['quant_max'] = 2 ** (8 - 1) - 1 if aq_symmetry else 2 ** 8 - 1 for node in node_to_quantize_output: @@ -373,7 +368,6 @@ def _insert_fake_quantize_for_act_quant(self, model: GraphModule, qconfig): return model -@register_model_quantizer(BackendType.Tensorrt) class TRTModelQuantizer(ModelQuantizer): """The different points of TRT quantizer are how to deal with add op and the last layer. @@ -647,6 +641,7 @@ def module_type_to_quant_input(self) -> tuple: # Conv torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d, torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d, + torch.nn.qat.Conv2d, # Linear torch.nn.qat.modules.linear.Linear, qnn.intrinsic.qat.LinearBn1d, diff --git a/mqbench/deploy/deploy_linear.py b/mqbench/deploy/deploy_linear.py index 0a9ba52..feb7845 100644 --- a/mqbench/deploy/deploy_linear.py +++ b/mqbench/deploy/deploy_linear.py @@ -98,7 +98,7 @@ def parse_qparams(self, node, name2data): logger.info(f'qmin and qmax are not found for <{node.name}>!') return tensor_name, scale, zero_point, qmin, qmax - def clip_weight(self, node, name2data, named_initializer): + def clip_weight(self, node, name2data, inp2node, named_initializer): tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data) data = name2data[tensor_name] clip_range_min = (qmin - zero_point) * scale @@ -106,7 +106,8 @@ def clip_weight(self, node, name2data, named_initializer): if scale.shape[0] > 1: new_data = [] transposed = False - if data.shape[0] != scale.shape[0]: + next_node = inp2node[node.output[0]] + if len(next_node) == 1 and next_node[0][0].op_type == 'ConvTranspose': transposed = True data = data.transpose(1, 0, 2, 3) for c in range(data.shape[0]): @@ -161,7 +162,7 @@ def remove_fakequantize_and_collect_params(self, onnx_path, model_name, backend) # fake quantize for weights, suppose per-channel quantize only for weight redundant_nodes = self.deal_with_weight_fakequant(node, out2node, inp2node, named_initializer) nodes_to_be_removed.extend(redundant_nodes) - self.clip_weight(node, name2data, named_initializer) + self.clip_weight(node, name2data, inp2node, named_initializer) if backend == 'ppl': tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data) clip_ranges[tensor_name] = {'step': [float(x) for x in scale], @@ -177,16 +178,16 @@ def remove_fakequantize_and_collect_params(self, onnx_path, model_name, backend) elif node.op_type in PERTENSOR_FAKEQUANTIZER: - if node.output[0] in [x.name for x in graph.output]: + if node.output[0] not in inp2node: + assert node.output[0] in [l.name for l in graph.output] inp2node[node.output[0]] = [] - next_nodes = inp2node[node.output[0]] if len(next_nodes) == 1 and next_nodes[0][1] == 1 and next_nodes[0][0].op_type in ['Gemm', 'Conv']: # fake quantize for weights redundant_nodes = self.deal_with_weight_fakequant(node, out2node, inp2node, named_initializer) tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data) nodes_to_be_removed.extend(redundant_nodes) - self.clip_weight(node, name2data, named_initializer) + self.clip_weight(node, name2data, inp2node, named_initializer) else: # fake quantize for activations self.deal_with_activation_fakequant(node, inp2node) diff --git a/mqbench/fake_quantize/adaround_quantizer.py b/mqbench/fake_quantize/adaround_quantizer.py index bcee07b..c5fbae3 100644 --- a/mqbench/fake_quantize/adaround_quantizer.py +++ b/mqbench/fake_quantize/adaround_quantizer.py @@ -100,9 +100,6 @@ def forward(self, X): self.zero_point.resize_(_zero_point.shape) self.scale.copy_(_scale) self.zero_point.copy_(_zero_point) - '''we could only do this once for weight because we usually adopt MSEObserver - in fact the weight only need once ''' - self.observer_enabled[0] = 0 if self.fake_quant_enabled[0] == 1: if not self.adaround: diff --git a/mqbench/fake_quantize/quantize_base.py b/mqbench/fake_quantize/quantize_base.py index 0338178..34b0bc1 100644 --- a/mqbench/fake_quantize/quantize_base.py +++ b/mqbench/fake_quantize/quantize_base.py @@ -45,4 +45,4 @@ def extra_repr(self): 'quant_min={}, quant_max={}, dtype={}, qscheme={}, ch_axis={}, '.format( self.fake_quant_enabled, self.observer_enabled, self.quant_min, self.quant_max, - self.dtype, self.qscheme, self.ch_axis) \ No newline at end of file + self.dtype, self.qscheme, self.ch_axis) diff --git a/mqbench/fuser_method_mappings.py b/mqbench/fuser_method_mappings.py index f8f3dc4..5738944 100644 --- a/mqbench/fuser_method_mappings.py +++ b/mqbench/fuser_method_mappings.py @@ -80,6 +80,7 @@ def fuse_deconv_bn_relu(deconv, bn, relu): ConvBNReLUFusion, }, "additional_qat_module_mappings": { + nn.ConvTranspose2d: qnn.qat.ConvTranspose2d, qnni.LinearBn1d: qnniqat.LinearBn1d, qnni.ConvTransposeBn2d: qnniqat.ConvTransposeBn2d, qnni.ConvTransposeReLU2d: qnniqat.ConvTransposeReLU2d, @@ -113,4 +114,4 @@ def _sort_fusion_patterns(pats): # proir than class ModuleReLUFusion. _sort_fusion_patterns(DEFAULT_FUSION_PATTERNS) DEFAULT_QAT_MODULE_MAPPINGS.update( - fuse_custom_config_dict['additional_qat_module_mappings']) \ No newline at end of file + fuse_custom_config_dict['additional_qat_module_mappings']) diff --git a/mqbench/nn/intrinsic/qat/modules/deconv_fused.py b/mqbench/nn/intrinsic/qat/modules/deconv_fused.py index efe7b81..fe83419 100644 --- a/mqbench/nn/intrinsic/qat/modules/deconv_fused.py +++ b/mqbench/nn/intrinsic/qat/modules/deconv_fused.py @@ -25,7 +25,7 @@ class _ConvTransposeBnNd(nn.modules.conv._ConvTransposeNd, _FusedModule): - _version = 1 + _version = 2 _FLOAT_MODULE = MOD def __init__( diff --git a/mqbench/nn/intrinsic/qat/modules/linear_fused.py b/mqbench/nn/intrinsic/qat/modules/linear_fused.py index 8c22eb1..98dfb4e 100644 --- a/mqbench/nn/intrinsic/qat/modules/linear_fused.py +++ b/mqbench/nn/intrinsic/qat/modules/linear_fused.py @@ -12,6 +12,7 @@ class LinearBn1d(Linear, _FusedModule): + _version = 2 _FLOAT_MODULE = LinearBn1d def __init__(self, diff --git a/mqbench/observer.py b/mqbench/observer.py index f62b4d7..1506c98 100644 --- a/mqbench/observer.py +++ b/mqbench/observer.py @@ -1,5 +1,7 @@ import math +from functools import partial from typing import Tuple + import torch from torch.quantization.observer import _ObserverBase @@ -24,13 +26,37 @@ class ObserverBase(_ObserverBase): max_val: torch.Tensor def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, - reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False): + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, + factory_kwargs=None): super(ObserverBase, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max) self.ch_axis = ch_axis self.pot_scale = pot_scale self.register_buffer("min_val", torch.tensor(float("inf"))) self.register_buffer("max_val", torch.tensor(float("-inf"))) + class PerChannelLoadHook: + def __init__(self, module): + self.hook = module._register_load_state_dict_pre_hook(partial(self.hook_fn, module=module)) + + def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, + module): + if module.ch_axis == -1: + # no per-channel parameters + return + for module_key, param in module._buffers.items(): + if module_key not in ['min_val', 'max_val']: + continue + candidate = prefix + module_key + if candidate in state_dict: + input_param = state_dict[candidate] + if param.shape != input_param.shape: + param.data = torch.ones_like(input_param, dtype=param.dtype, device=param.device) + + def close(self): + self.hook.remove() + + self.load_state_dict_hook = PerChannelLoadHook(self) + @torch.jit.export def calculate_qparams(self) -> Tuple[torch.Tensor, torch.Tensor]: r"""Calculates the quantization parameters.""" @@ -96,8 +122,10 @@ class MinMaxObserver(ObserverBase): ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, - reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False): - super(MinMaxObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, + factory_kwargs=None): + super(MinMaxObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) def forward(self, x_orig): r"""Records the running minimum and maximum of ``x``.""" @@ -126,11 +154,13 @@ class MinMaxFloorObserver(ObserverBase): ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, - reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False): - super(MinMaxFloorObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) - ''' - The quant_type could be 'input', 'param', 'tensor', the co-responding - range is 1, 5, 5, + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, + factory_kwargs=None): + super(MinMaxFloorObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) + ''' + The quant_type could be 'input', 'param', 'tensor', the co-responding + range is 1, 5, 5, mth is 2, 3, 2 ''' self.quant_type = None @@ -144,7 +174,7 @@ def forward(self, x_orig): if self.ch_axis == -1: min_val_cur, max_val_cur = torch._aminmax(x) else: - logger.warn('The per-tensor observer does not support per-channel min-max!') + logger.warn('The per-tensor observer does not support per-channel min-max!') min_val_cur, max_val_cur = torch._aminmax(x) self.min_val = min_val_cur @@ -156,10 +186,10 @@ def calculate_qparams(self): if self.quant_type is None: raise ValueError('You should set the observer type before forward!') else: - scale_range = 1 if self.quant_type == 'input' else 5 - mth = 3 if self.quant_type == 'param' else 2 + scale_range = 1 if self.quant_type == 'input' else 5 + mth = 3 if self.quant_type == 'param' else 2 scale, zero_point = self._calculate_qparams(self.min_val, self.max_val) - scale.data = scale.data * 0 + max(self.min_val / self.quant_min, self.max_val / self.quant_max) + scale.data = scale.data * 0 + max(self.min_val / self.quant_min, self.max_val / self.quant_max) if scale < 2 ** -15: max_scale = 0 else: @@ -169,18 +199,18 @@ def calculate_qparams(self): final_scale = max_scale max_scale = int(max_scale) for s in range(max_scale, max_scale + scale_range): - _s = 1 / 2 ** s + _s = 1 / 2 ** s if mth == 3: new_x = _s * torch.clamp(torch.round(self._x / _s), self.quant_min, self.quant_max) elif mth == 2: - new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max) + new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max) new_x = torch.where((new_x < 0) & (new_x - new_x.floor() == 0.5), new_x.ceil(), new_x.round()) - new_x *= _s + new_x *= _s loss = ((new_x - self._x)**2).sum() min_loss = min_loss.to(loss.device) if loss < min_loss: min_loss = loss - final_scale = s + final_scale = s final_scale = min(final_scale, 12) scale = scale.data * 0 + 1 / (2 ** final_scale) zero_point = torch.zeros_like(zero_point) @@ -200,9 +230,10 @@ class EMAMinMaxObserver(ObserverBase): """ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9): + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9, + factory_kwargs=None): super(EMAMinMaxObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, - ch_axis, pot_scale) + ch_axis, pot_scale, factory_kwargs) self.ema_ratio = ema_ratio def forward(self, x_orig): @@ -235,9 +266,10 @@ class EMAMinMaxFloorObserver(ObserverBase): """ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9): + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9, + factory_kwargs=None): super(EMAMinMaxFloorObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, - ch_axis, pot_scale) + ch_axis, pot_scale, factory_kwargs) self.ema_ratio = ema_ratio self.quant_type = None @@ -250,7 +282,7 @@ def forward(self, x_orig): if self.ch_axis == -1: min_val_cur, max_val_cur = torch._aminmax(x) else: - logger.warn('The per-tensor observer does not support per-channel min-max!') + logger.warn('The per-tensor observer does not support per-channel min-max!') min_val_cur, max_val_cur = torch._aminmax(x) if self.max_val.numel() <= 1 and self.max_val.isinf(): @@ -265,8 +297,8 @@ def calculate_qparams(self): if self.quant_type is None: raise ValueError('You should set the observer type before forward!') else: - scale_range = 1 if self.quant_type == 'input' else 5 - mth = 3 if self.quant_type == 'param' else 2 + scale_range = 1 if self.quant_type == 'input' else 5 + mth = 3 if self.quant_type == 'param' else 2 scale, zero_point = self._calculate_qparams(self.min_val, self.max_val) scale.data = scale.data * 0 + max(self.min_val / self.quant_min, self.max_val / self.quant_max) if scale < 2 ** -15: @@ -274,24 +306,24 @@ def calculate_qparams(self): else: max_scale = 1 / scale max_scale = torch.floor(max_scale.log2()) - max_scale = 1 / scale + max_scale = 1 / scale max_scale = torch.floor(max_scale.log2()) min_loss = torch.tensor([float('inf')]) final_scale = max_scale max_scale = int(max_scale) for s in range(max_scale, max_scale + scale_range): - _s = 1 / 2 ** s + _s = 1 / 2 ** s if mth == 3: new_x = _s * torch.clamp(torch.round(self._x / _s), self.quant_min, self.quant_max) elif mth == 2: - new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max) + new_x = torch.clamp(self._x / _s, self.quant_min, self.quant_max) new_x = torch.where((new_x < 0) & (new_x - new_x.floor() == 0.5), new_x.ceil(), new_x.round()) - new_x *= _s + new_x *= _s loss = ((new_x - self._x)**2).sum() min_loss = min_loss.to(loss.device) if loss < min_loss: min_loss = loss - final_scale = s + final_scale = s final_scale = min(final_scale, 12) scale = scale.data * 0 + 1 / (2 ** final_scale) zero_point = torch.zeros_like(zero_point) @@ -310,10 +342,10 @@ class EMAQuantileObserver(ObserverBase): """ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9, threshold=0.99999, - bins=2048): + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, ema_ratio=0.9, + threshold=0.99999, bins=2048, factory_kwargs=None): super(EMAQuantileObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, - ch_axis, pot_scale) + ch_axis, pot_scale, factory_kwargs) assert self.ch_axis == -1, "Quantile observer only support in per-tensor scheme." self.ema_ratio = ema_ratio self.threshold = threshold @@ -347,8 +379,10 @@ class ClipStdObserver(ObserverBase): """ def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, std_scale=2.6): - super(ClipStdObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, std_scale=2.6, + factory_kwargs=None): + super(ClipStdObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs=None) self.std_scale = std_scale def forward(self, x_orig): @@ -387,8 +421,9 @@ class LSQObserver(ObserverBase): ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False): - super(LSQObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, factory_kwargs=None): + super(LSQObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) self.tensor_norm = None def forward(self, x_orig): @@ -430,10 +465,10 @@ class LSQPlusObserver(ObserverBase): ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False, - quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False): + quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, factory_kwargs=None): - super(LSQPlusObserver, self).__init__(dtype, qscheme, reduce_range, - quant_min, quant_max, ch_axis, pot_scale) + super(LSQPlusObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) self.mean = None self.std = None @@ -479,49 +514,44 @@ class MSEObserver(ObserverBase): ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, - reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, p=2.0): - super(MSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, p=2.0, + factory_kwargs=None): + super(MSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) self.p = p - def quantize(self, x: torch.Tensor, scale: float, zero_point: float): - x_int = torch.round(x / scale) - x_quant = torch.clamp(x_int + zero_point, self.quant_min, self.quant_max) - x_float_q = (x_quant - zero_point) * scale - return x_float_q - - def lp_loss(self, pred, tgt, p=2.0): + def lp_loss(self, pred, tgt): """ loss function measured in L_p Norm """ return (pred - tgt).abs().pow(self.p).mean() - def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor): + def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor, iter=80): best_score = 1e+10 - best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.int) + best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.float) best_min.copy_(x_min) best_max.copy_(x_max) - for i in range(45): + for i in range(iter): new_min = x_min * (1.0 - (i * 0.01)) new_max = x_max * (1.0 - (i * 0.01)) scale, zero_point = self._calculate_qparams(new_min, new_max) - if self.pot_scale: - scale = pot_quantization(scale) - x_q = self.quantize(x, scale.data, zero_point.data) + x_q = torch.fake_quantize_per_tensor_affine( + x, scale.item(), int(zero_point.item()), + self.quant_min, self.quant_max) score = self.lp_loss(x_q, x) if score < best_score: best_score = score best_min, best_max = new_min, new_max - return best_min, best_max def forward(self, x_orig): r"""Records the running minimum and maximum of ``x``.""" if x_orig.numel() == 0: return x_orig - x = x_orig.to(self.min_val.dtype) + x = x_orig.clone().detach().to(self.min_val.dtype) if self.ch_axis == -1: min_val_cur, max_val_cur = torch._aminmax(x) - min_val_cur, max_val_cur = self.mse(x_orig.clone().detach(), min_val_cur, max_val_cur) + min_val_cur, max_val_cur = self.mse(x, min_val_cur, max_val_cur, iter=95) else: x_dim = x.size() new_axis_list = [i for i in range(len(x_dim))] @@ -531,7 +561,7 @@ def forward(self, x_orig): y = torch.flatten(x_channel, start_dim=1) min_val_cur, max_val_cur = torch._aminmax(y, 1) for ch, val in enumerate(min_val_cur): - min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel, min_val_cur[ch], max_val_cur[ch]) + min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel[ch], min_val_cur[ch], max_val_cur[ch], iter=80) self.min_val = torch.min(self.min_val, min_val_cur) self.max_val = torch.max(self.max_val, max_val_cur) @@ -543,50 +573,45 @@ class EMAMSEObserver(ObserverBase): Calculate mseobserver of whole calibration dataset. ''' def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine, - reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, p=2.0, ema_ratio=0.9): - super(EMAMSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, ch_axis, pot_scale) + reduce_range=False, quant_min=None, quant_max=None, ch_axis=-1, pot_scale=False, + p=2.0, ema_ratio=0.9, factory_kwargs=None): + super(EMAMSEObserver, self).__init__(dtype, qscheme, reduce_range, quant_min, quant_max, + ch_axis, pot_scale, factory_kwargs) self.ema_ratio = ema_ratio self.p = p - def quantize(self, x: torch.Tensor, scale: float, zero_point: float): - x_int = torch.round(x / scale) - x_quant = torch.clamp(x_int + zero_point, self.quant_min, self.quant_max) - x_float_q = (x_quant - zero_point) * scale - return x_float_q - def lp_loss(self, pred, tgt): """ loss function measured in L_p Norm """ return (pred - tgt).abs().pow(self.p).mean() - def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor): + def mse(self, x: torch.Tensor, x_min: torch.Tensor, x_max: torch.Tensor, iter=80): best_score = 1e+10 - best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.int) + best_min, best_max = torch.tensor([1.0], dtype=torch.float), torch.tensor([1.0], dtype=torch.float) best_min.copy_(x_min) best_max.copy_(x_max) - for i in range(45): + for i in range(iter): new_min = x_min * (1.0 - (i * 0.01)) new_max = x_max * (1.0 - (i * 0.01)) scale, zero_point = self._calculate_qparams(new_min, new_max) - if self.pot_scale: - scale = pot_quantization(scale) - x_q = self.quantize(x, scale.data, zero_point.data) + x_q = torch.fake_quantize_per_tensor_affine( + x, scale.item(), int(zero_point.item()), + self.quant_min, self.quant_max) score = self.lp_loss(x_q, x) if score < best_score: best_score = score best_min, best_max = new_min, new_max - return best_min, best_max def forward(self, x_orig): r"""Records the running minimum and maximum of ``x``.""" if x_orig.numel() == 0: return x_orig - x = x_orig.to(self.min_val.dtype) + x = x_orig.clone().detach().to(self.min_val.dtype) if self.ch_axis == -1: min_val_cur, max_val_cur = torch._aminmax(x) - min_val_cur, max_val_cur = self.mse(x_orig.clone().detach(), min_val_cur, max_val_cur) + min_val_cur, max_val_cur = self.mse(x, min_val_cur, max_val_cur, iter=95) else: x_dim = x.size() new_axis_list = [i for i in range(len(x_dim))] @@ -596,8 +621,8 @@ def forward(self, x_orig): y = torch.flatten(x_channel, start_dim=1) min_val_cur, max_val_cur = torch._aminmax(y, 1) for ch, val in enumerate(min_val_cur): - min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel[ch].clone().detach(), min_val_cur[ch], - max_val_cur[ch]) + min_val_cur[ch], max_val_cur[ch] = self.mse(x_channel[ch], min_val_cur[ch], + max_val_cur[ch], iter=80) if self.max_val.numel() <= 1 and self.max_val.isinf(): self.min_val = min_val_cur diff --git a/mqbench/prepare_by_platform.py b/mqbench/prepare_by_platform.py index 47c8835..6d67059 100644 --- a/mqbench/prepare_by_platform.py +++ b/mqbench/prepare_by_platform.py @@ -2,7 +2,8 @@ from typing import Any, Dict import torch -from torch.fx.symbolic_trace import symbolic_trace +from torch.fx import Tracer +from torch.fx.graph_module import GraphModule from torch.quantization.quantize_fx import _swap_ff_with_fxff from torch.quantization import QConfig @@ -75,13 +76,13 @@ class BackendType(Enum): default_act_quantize=LearnableFakeQuantize, default_weight_observer=MinMaxObserver, default_act_observer=EMAMinMaxObserver), - BackendType.Vitis: dict(qtype='vitis', # noqa: E241 - w_qscheme=QuantizeScheme(symmetry=True, per_channel=False, pot_scale=True, bit=8), - a_qscheme=QuantizeScheme(symmetry=True, per_channel=False, pot_scale=True, bit=8), - default_weight_quantize=TqtFakeQuantize, - default_act_quantize=TqtFakeQuantize, - default_weight_observer=MinMaxFloorObserver, - default_act_observer=EMAMinMaxFloorObserver), + BackendType.Vitis: dict(qtype='vitis', # noqa: E241 + w_qscheme=QuantizeScheme(symmetry=True, per_channel=False, pot_scale=True, bit=8), + a_qscheme=QuantizeScheme(symmetry=True, per_channel=False, pot_scale=True, bit=8), + default_weight_quantize=TqtFakeQuantize, + default_act_quantize=TqtFakeQuantize, + default_weight_observer=MinMaxFloorObserver, + default_act_observer=EMAMinMaxFloorObserver), BackendType.ONNX_QNN: dict(qtype='affine', # noqa: E241 w_qscheme=QuantizeScheme(symmetry=False, per_channel=False, pot_scale=False, bit=8), a_qscheme=QuantizeScheme(symmetry=False, per_channel=False, pot_scale=False, bit=8), @@ -221,6 +222,39 @@ def get_qconfig_by_platform(deploy_backend: BackendType, extra_qparams: Dict): return QConfig(activation=a_qconfig, weight=w_qconfig) +class CustomedTracer(Tracer): + """ + ``Tracer`` is the class that implements the symbolic tracing functionality + of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent + to ``Tracer().trace(m)``. + This Tracer override the ``is_leaf_module`` function to make symbolic trace + right in some cases. + """ + def __init__(self, *args, customed_leaf_module=None, **kwargs): + super().__init__(*args, **kwargs) + self.customed_leaf_module = customed_leaf_module + + def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: + """ + A method to specify whether a given ``nn.Module`` is a "leaf" module. + Leaf modules are the atomic units that appear in + the IR, referenced by ``call_module`` calls. By default, + Modules in the PyTorch standard library namespace (torch.nn) + are leaf modules. All other modules are traced through and + their constituent ops are recorded, unless specified otherwise + via this parameter. + Args: + m (Module): The module being queried about + module_qualified_name (str): The path to root of this module. For example, + if you have a module hierarchy where submodule ``foo`` contains + submodule ``bar``, which contains submodule ``baz``, that module will + appear with the qualified name ``foo.bar.baz`` here. + """ + if self.customed_leaf_module and isinstance(m, customed_leaf_module): + return True + return m.__module__.startswith('torch.nn') and not isinstance(m, torch.nn.Sequential) + + def prepare_by_platform( model: torch.nn.Module, deploy_backend: BackendType, @@ -264,7 +298,11 @@ def prepare_by_platform( preserve_attr_dict[submodule_name][attr] = getattr(cur_module, attr) # Symbolic trace concrete_args = prepare_custom_config_dict.get('concrete_args', None) - graph_module = symbolic_trace(model, concrete_args=concrete_args) + customed_leaf_module = prepare_custom_config_dict.get('leaf_module', {}) + tracer = CustomedTracer(customed_leaf_module=customed_leaf_module) + graph = tracer.trace(model, concrete_args) + name = model.__class__.__name__ if isinstance(model, torch.nn.Module) else model.__name__ + graph_module = GraphModule(tracer.root, graph, name) # Model fusion. extra_fuse_dict = prepare_custom_config_dict.get('extra_fuse_dict', {}) extra_fuse_dict.update(fuse_custom_config_dict) diff --git a/mqbench/quantization/__init__.py b/mqbench/quantization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mqbench/tools/__init__.py b/mqbench/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mqbench/utils/state.py b/mqbench/utils/state.py index 332703d..2f41161 100644 --- a/mqbench/utils/state.py +++ b/mqbench/utils/state.py @@ -11,6 +11,29 @@ def enable_calibration(model): submodule.enable_observer() submodule.disable_fake_quant() +def enable_calibration_woquantization(model, quantizer_type='fake_quant'): + logger.info('Enable observer and Disable quantize for {}'.format(quantizer_type)) + for name, submodule in model.named_modules(): + if isinstance(submodule, torch.quantization.FakeQuantizeBase): + if quantizer_type not in name: + submodule.disable_observer() + submodule.disable_fake_quant() + continue + logger.debug('Enable observer and Disable quant: {}'.format(name)) + submodule.enable_observer() + submodule.disable_fake_quant() + +def enable_calibration_quantization(model, quantizer_type='fake_quant'): + logger.info('Enable observer and Enable quantize for {}'.format(quantizer_type)) + for name, submodule in model.named_modules(): + if isinstance(submodule, torch.quantization.FakeQuantizeBase): + if quantizer_type not in name: + submodule.disable_observer() + submodule.disable_fake_quant() + continue + logger.debug('Enable observer and Enable quant: {}'.format(name)) + submodule.enable_observer() + submodule.enable_fake_quant() def enable_quantization(model): logger.info('Disable observer and Enable quantize.')