Skip to content

Commit

Permalink
fix: installation of paddle on Windows, disable two tests on Windows,…
Browse files Browse the repository at this point in the history
… syntax error in entrypoint.sh
  • Loading branch information
0x3878f committed Jan 17, 2025
1 parent f79e69c commit a2b57db
Show file tree
Hide file tree
Showing 6 changed files with 80 additions and 81 deletions.
44 changes: 9 additions & 35 deletions .github/workflows/build_and_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,50 +15,24 @@ jobs:
strategy:
matrix:
python-version: [ '3.8']
architecture: [ 'x64' ]

architecture: [ 'x86_64']
steps:
# Checkout the latest branch of Paddle2ONNX.
- name: Checkout Paddle2ONNX
uses: actions/checkout@v4
with:
submodules: true

# Setup Python
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
- name: Build on manylinux_2_28_x86_64
uses: docker://quay.io/pypa/manylinux_2_28_x86_64:latest
with:
python-version: ${{ matrix.python-version }}
entrypoint: bash
args: .github/workflows/scripts/entrypoint.sh ${{ matrix.python-version }}

# Clone protobuf repository and checkout to v21.12
- name: Clone protobuf
run: |
git clone https://github.com/protocolbuffers/protobuf.git
cd protobuf
git checkout v21.12
# Build and install protobuf
- name: Build and install protobuf
run: |
cd protobuf
git submodule update --init --recursive
mkdir build
cd build
cmake ../cmake -DCMAKE_INSTALL_PREFIX=${{ github.workspace }}/installed_protobuf -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_STANDARD=14
cmake --build . --target install
# Install Python dependencies
- name: Install Python dependencies
run: |
python -m pip install -q --upgrade pip
python -m pip install setuptools wheel build
# Build package
- name: Build package
run: |
export PATH="${{ github.workspace }}/installed_protobuf/bin:$PATH"
export PIP_EXTRA_INDEX_URL="https://www.paddlepaddle.org.cn/packages/nightly/cpu/"
python -m build --wheel
- name: Setup Python 3.8
uses: actions/setup-python@v5
with:
python-version: '3.8'

# Install Paddle2ONNX
- name: Install Paddle2ONNX
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release_linux_aarch64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
# setting up qemu for enabling aarch64 binary execution on x86 machine
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0

- name: Build on manylinux2014_aarch64
- name: Build on manylinux_2_28_aarch64
uses: docker://quay.io/pypa/manylinux_2_28_aarch64
with:
entrypoint: bash
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release_linux_x86_64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
with:
submodules: true

- name: Build on manylinux2014_x86_64
- name: Build on manylinux_2_28_x86_64
uses: docker://quay.io/pypa/manylinux_2_28_x86_64:latest
with:
entrypoint: bash
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scripts/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ $PYTHON_COMMAND -m build --wheel || { echo "Building wheels failed."; exit 1; }
# find -exec does not preserve failed exit codes, so use an output file for failures
failed_wheels=$PWD/failed-wheels
rm -f "$failed_wheels"
find . -type f -iname "*-linux*.whl" -exec sh -c "auditwheel repair '{}' -w \$(dirname '{}') --exclude libpaddle.so' || { echo 'Repairing wheels failed.'; auditwheel show '{}' >> '$failed_wheels'; }" \;
find . -type f -iname "*-linux*.whl" -exec sh -c "auditwheel repair '{}' -w \$(dirname '{}') --exclude libpaddle.so || { echo 'Repairing wheels failed.'; auditwheel show '{}' >> '$failed_wheels'; }" \;

if [[ -f "$failed_wheels" ]]; then
echo "Repairing wheels failed:"
Expand Down
107 changes: 65 additions & 42 deletions tests/auto_scan_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,12 @@
import time
import logging
import paddle
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
from onnxbase import APIOnnx, randtool
from itertools import product
import copy
from inspect import isfunction
from onnxbase import _test_with_pir

paddle.set_device("cpu")

Expand All @@ -34,21 +32,25 @@
settings.register_profile(
"ci",
max_examples=100,
suppress_health_check=hypothesis.HealthCheck.all(),
suppress_health_check=list(HealthCheck),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False)
report_multiple_bugs=False,
)
settings.register_profile(
"dev",
max_examples=1000,
suppress_health_check=hypothesis.HealthCheck.all(),
suppress_health_check=list(HealthCheck),
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False)
if float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \
os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci':
report_multiple_bugs=False,
)
if (
float(os.getenv("TEST_NUM_PERCENT_CASES", default="1.0")) < 1
or os.getenv("HYPOTHESIS_TEST_PROFILE", "dev") == "ci"
):
settings.load_profile("ci")
else:
settings.load_profile("dev")
Expand All @@ -75,12 +77,14 @@ def __init__(self, *args, **kwargs):
self.num_ran_models = 0

# @_test_with_pir
def run_and_statis(self,
max_examples=100,
opset_version=[7, 9, 15],
reproduce=None,
min_success_num=25,
max_duration=-1):
def run_and_statis(
self,
max_examples=100,
opset_version=[7, 9, 15],
reproduce=None,
min_success_num=25,
max_duration=-1,
):
self.num_ran_models = 0
if os.getenv("CE_STAGE", "OFF") == "ON":
max_examples *= 10
Expand All @@ -95,7 +99,8 @@ def run_and_statis(self,
deadline=None,
print_blob=True,
derandomize=True,
report_multiple_bugs=False, )
report_multiple_bugs=False,
)
settings.load_profile("ci")

def sample_convert_generator(draw):
Expand All @@ -113,38 +118,43 @@ def run_test(configs):
paddle.disable_static()
loop_func()

logging.info(
"===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(
self.num_ran_models))
logging.info("===================Statistical Information===================")
logging.info("Number of Generated Programs: {}".format(self.num_ran_models))
successful_ran_programs = int(self.num_ran_models)
if successful_ran_programs < min_success_num:
logging.warning("satisfied_programs = ran_programs")
logging.error(
"At least {} programs need to ran successfully, but now only about {} programs satisfied.".
format(min_success_num, successful_ran_programs))
"At least {} programs need to ran successfully, but now only about {} programs satisfied.".format(
min_success_num, successful_ran_programs
)
)
assert False
used_time = time.time() - start_time
logging.info("Used time: {} s".format(round(used_time, 2)))
if max_duration > 0 and used_time > max_duration:
logging.error(
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.".
format(max_duration))
"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.".format(
max_duration
)
)
assert False

def run_test(self, configs):
config, models = configs
logging.info("Run configs: {}".format(config))

assert "op_names" in config.keys(
), "config must include op_names in dict keys"
assert "test_data_shapes" in config.keys(
assert "op_names" in config.keys(), "config must include op_names in dict keys"
assert (
"test_data_shapes" in config.keys()
), "config must include test_data_shapes in dict keys"
assert "test_data_types" in config.keys(
assert (
"test_data_types" in config.keys()
), "config must include test_data_types in dict keys"
assert "opset_version" in config.keys(
assert (
"opset_version" in config.keys()
), "config must include opset_version in dict keys"
assert "input_spec_shape" in config.keys(
assert (
"input_spec_shape" in config.keys()
), "config must include input_spec_shape in dict keys"

op_names = config["op_names"]
Expand All @@ -169,7 +179,8 @@ def run_test(self, configs):
opset_version = opset_version * len(models)

assert len(models) == len(
op_names), "Length of models should be equal to length of op_names"
op_names
), "Length of models should be equal to length of op_names"

input_type_list = None
if len(test_data_types) > 1:
Expand All @@ -191,8 +202,16 @@ def run_test(self, configs):

for i, model in enumerate(models):
model.eval()
obj = APIOnnx(model, op_names[i], opset_version[i], op_names[i],
input_specs, delta, rtol, use_gpu)
obj = APIOnnx(
model,
op_names[i],
opset_version[i],
op_names[i],
input_specs,
delta,
rtol,
use_gpu,
)
for input_type in input_type_list:
input_tensors = list()
for j, shape in enumerate(test_data_shapes):
Expand All @@ -202,24 +221,28 @@ def run_test(self, configs):
data = data.astype(input_type[j])
input_tensors.append(paddle.to_tensor(data))
continue
if input_type[j].count('int') > 0:
if input_type[j].count("int") > 0:
input_tensors.append(
paddle.to_tensor(
randtool("int", -20, 20, shape).astype(
input_type[j])))
elif input_type[j].count('bool') > 0:
randtool("int", -20, 20, shape).astype(input_type[j])
)
)
elif input_type[j].count("bool") > 0:
input_tensors.append(
paddle.to_tensor(
randtool("bool", -2, 2, shape).astype(
input_type[j])))
randtool("bool", -2, 2, shape).astype(input_type[j])
)
)
else:
input_tensors.append(
paddle.to_tensor(
randtool("float", -2, 2, shape).astype(
input_type[j])))
randtool("float", -2, 2, shape).astype(input_type[j])
)
)
obj.set_input_data("input_data", tuple(input_tensors))
logging.info("Now Run >>> dtype: {}, op_name: {}".format(
input_type, op_names[i]))
logging.info(
"Now Run >>> dtype: {}, op_name: {}".format(input_type, op_names[i])
)
obj.run()
if len(input_type_list) == 0:
obj.run()
Expand Down
4 changes: 3 additions & 1 deletion tests/run.bat
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ set ignore=!ignore! test_resnet_fp16.py
set ignore=!ignore! test_empty.py
set ignore=!ignore! test_auto_scan_pool_max_ops.py
set ignore=!ignore! test_auto_scan_fill_constant.py
set ignore=!ignore! test_auto_scan_layer_norm.py
set ignore=!ignore! test_auto_scan_scatter_nd_add.py

REM Initialize bug count
set bug=0
Expand All @@ -81,7 +83,7 @@ set PY_CMD=%1
%PY_CMD% -m pip install onnx onnxruntime tqdm filelock
%PY_CMD% -m pip install six hypothesis
REM %PY_CMD% -m pip install --pre paddlepaddle -i https://www.paddlepaddle.org.cn/packages/nightly/cpu/
%PY_CMD% -m pip install https://paddle2onnx.bj.bcebos.com/paddle_windows/paddlepaddle_gpu-0.0.0-cp310-cp310-win_amd64.wh
%PY_CMD% -m pip install https://paddle2onnx.bj.bcebos.com/paddle_windows/paddlepaddle_gpu-0.0.0-cp310-cp310-win_amd64.whl

REM Enable development mode and run tests
set ENABLE_DEV=ON
Expand Down

0 comments on commit a2b57db

Please sign in to comment.