diff --git a/converters/to_onnx.py b/converters/to_onnx.py index 15b71a0..b8268ba 100644 --- a/converters/to_onnx.py +++ b/converters/to_onnx.py @@ -98,10 +98,18 @@ def main() -> None: required=True, ) + arg( + "-i", + "--image_path", + type=int, + help="The path of the input image", + required=True, + ) + arg("-o", "--output_file", type=str, help="Path to save onnx model.", required=True) args = parser.parse_args() - raw_image = cv2.imread("tests/data/13.jpg") + raw_image = cv2.imread(args.image_path) image = prepare_image(raw_image, args.max_size) @@ -134,18 +142,17 @@ def main() -> None: if not np.allclose(out_torch[i].numpy(), outputs[i]): raise ValueError("torch and onnx models do not match!") - annotations: List[Dict[str, List[Union[float, List[float]]]]] = [] + annotations: List[Dict[str, List[Union[float, List[float]]]]] = [ + { + "bbox": box.tolist(), + "score": outputs[1][box_id], + "landmarks": outputs[2][box_id].reshape(-1, 2).tolist(), + } + for box_id, box in enumerate(outputs[0]) + ] - for box_id, box in enumerate(outputs[0]): - annotations += [ - { - "bbox": box.tolist(), - "score": outputs[1][box_id], - "landmarks": outputs[2][box_id].reshape(-1, 2).tolist(), - } - ] - im = albu.Compose([albu.LongestMaxSize(max_size=1280)])(image=raw_image)["image"] + im = albu.Compose([albu.LongestMaxSize(max_size=args.max_size)])(image=raw_image)["image"] cv2.imwrite("example.jpg", vis_annotations(im, annotations))