From 2cc399fa19f4d429e018dbefc6e6e3da63120f0f Mon Sep 17 00:00:00 2001 From: Mohamed Naas Date: Wed, 20 Jul 2022 16:52:24 +0200 Subject: [PATCH 1/2] fix typo in to_onnx.py --- converters/to_onnx.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/converters/to_onnx.py b/converters/to_onnx.py index 15b71a0..41b176a 100644 --- a/converters/to_onnx.py +++ b/converters/to_onnx.py @@ -98,10 +98,18 @@ def main() -> None: required=True, ) + arg( + "-i", + "--image_path", + type=int, + help="The path of the input image", + required=True, + ) + arg("-o", "--output_file", type=str, help="Path to save onnx model.", required=True) args = parser.parse_args() - raw_image = cv2.imread("tests/data/13.jpg") + raw_image = cv2.imread(args.image_path) image = prepare_image(raw_image, args.max_size) @@ -145,7 +153,7 @@ def main() -> None: } ] - im = albu.Compose([albu.LongestMaxSize(max_size=1280)])(image=raw_image)["image"] + im = albu.Compose([albu.LongestMaxSize(max_size=args.max_size)])(image=raw_image)["image"] cv2.imwrite("example.jpg", vis_annotations(im, annotations)) From c977fc9db913af7ca3d95a1536be4b06b67cc8fd Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Wed, 20 Jul 2022 14:55:06 +0000 Subject: [PATCH 2/2] 'Refactored by Sourcery' --- converters/to_onnx.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/converters/to_onnx.py b/converters/to_onnx.py index 41b176a..b8268ba 100644 --- a/converters/to_onnx.py +++ b/converters/to_onnx.py @@ -142,16 +142,15 @@ def main() -> None: if not np.allclose(out_torch[i].numpy(), outputs[i]): raise ValueError("torch and onnx models do not match!") - annotations: List[Dict[str, List[Union[float, List[float]]]]] = [] - - for box_id, box in enumerate(outputs[0]): - annotations += [ - { - "bbox": box.tolist(), - "score": outputs[1][box_id], - "landmarks": outputs[2][box_id].reshape(-1, 2).tolist(), - } - ] + annotations: List[Dict[str, List[Union[float, List[float]]]]] = [ + { + "bbox": box.tolist(), + "score": outputs[1][box_id], + "landmarks": outputs[2][box_id].reshape(-1, 2).tolist(), + } + for box_id, box in enumerate(outputs[0]) + ] + im = albu.Compose([albu.LongestMaxSize(max_size=args.max_size)])(image=raw_image)["image"] cv2.imwrite("example.jpg", vis_annotations(im, annotations))