-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Aidan Boyd
committed
Dec 21, 2022
1 parent
9dca283
commit abde96e
Showing
29 changed files
with
5,833 additions
and
0 deletions.
There are no files selected for viewing
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,50 @@ | ||
# Official repo for WACV 2023 xIA workshop PBM paper | ||
|
||
## This is the command line interface for the patch-based matching of two iris images. | ||
|
||
All code was written to run on a GPU, but in the case none is available it should run fine on the CPU. | ||
|
||
## Download the trained model | ||
|
||
The model can be downloaded here: | ||
https://drive.google.com/file/d/1w5nEesvF--j9nYslPOHnDKTwcIk4WqS1/view?usp=sharing | ||
|
||
Place the model (named wacv_model.h5) in the ./Model/ folder such that the final path is ./Model/wacv_model.h5 | ||
|
||
## Creating the environment: | ||
|
||
To create the conda environment to run this code, run the following commands: | ||
```` | ||
conda env create -f environment.yml | ||
* OR * | ||
conda create --name pbm --file requirements.txt | ||
conda activate pbm | ||
```` | ||
This operates on a linux machine, and has not been tested on Apple chips. | ||
|
||
## Preparing the data | ||
|
||
Currently there is an assumption that the images have been previously segmented and cropped to images of size 256x256px. Segmentation masks must also be cropped to the same as the images. Images and masks must have the same filenames and be placed in distinct folders i.e. ./workdir/input/images/ and ./workdir/input/masks/ | ||
|
||
Examples of a segmented and cropped image and mask: | ||
|
||
 | ||
 | ||
|
||
For matching, a file must be created to determine which images are going to be matched, this must follow the same format as in the example, the text file ./example_pairs.txt | ||
|
||
## Running the code | ||
|
||
To run the program, you need to specify the path to the matching pairs file, the location of the images, the location of the masks, and where to save the output scorefile. Example: | ||
|
||
```` | ||
python pipeline_from_file.py --textfile ./example_pairs.txt --cropped_image_path ./workdir/input/images/ --cropped_mask_path ./workdir/input/masks/ --scorefile ./example_scores.txt | ||
```` | ||
|
||
The pipeline_from_file.py file should run with default parameters, but we suggest that users modify them to their own specifications. You should not need to change the model path, please use wacv_model.h5. | ||
|
||
By default, the output visualizations are saved in ./workdir/patch-based/output/ but this can be modified using the --destination flag. | ||
|
||
## Output scores | ||
|
||
The scorefile generated will contain four columns; the probe image, the gallery image, whether it is genuine or not (0 for different eyes and 1 for a genuine pair) and the distance measure which can be used for plotting. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
import os | ||
import sys | ||
import json | ||
import datetime | ||
import numpy as np | ||
from tqdm import tqdm | ||
import joblib | ||
# Import Mask RCNN | ||
from mrcnn.config import Config | ||
from mrcnn import model as modellib, utils | ||
|
||
|
||
|
||
|
||
############################################################ | ||
# Configurations | ||
############################################################ | ||
|
||
|
||
class DetectorConfig(Config): | ||
BACKBONE="resnet50" | ||
BATCH_SIZE=8 | ||
DETECTION_MAX_INSTANCES=100 | ||
DETECTION_MIN_CONFIDENCE=0 | ||
DETECTION_NMS_THRESHOLD=0.3 | ||
GPU_COUNT=1 | ||
IMAGES_PER_GPU=8 | ||
IMAGE_CHANNEL_COUNT=3 | ||
IMAGE_MAX_DIM=256 | ||
IMAGE_MIN_DIM=256 | ||
IMAGE_MIN_SCALE=0 | ||
LEARNING_MOMENTUM=0.9 | ||
LEARNING_RATE=0.001 | ||
LOSS_WEIGHTS={'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 3.0} | ||
MASK_POOL_SIZE=14 | ||
MASK_SHAPE=[28, 28] | ||
MAX_GT_INSTANCES=40 | ||
MEAN_PIXEL=[50., 50., 50.] | ||
NAME="iris_feature_finetuned" | ||
NUM_CLASSES=2 | ||
POST_NMS_ROIS_INFERENCE=1000 | ||
POST_NMS_ROIS_TRAINING=2000 | ||
RPN_ANCHOR_SCALES=(8, 16, 32, 64, 128) | ||
RPN_NMS_THRESHOLD=0.9 | ||
RPN_TRAIN_ANCHORS_PER_IMAGE=256 | ||
STEPS_PER_EPOCH=500 | ||
TRAIN_ROIS_PER_IMAGE=256 | ||
USE_MINI_MASK=False | ||
USE_RPN_ROIS=True | ||
WEIGHT_DECAY=0.01 | ||
|
||
|
||
|
||
|
||
############################################################ | ||
# Dataset | ||
############################################################ | ||
|
||
class DetectorDataset(utils.Dataset): | ||
def load_detector(self): | ||
# Add classes. We have only one class to add. | ||
self.add_class("iris_feature", 1, "iris_feature") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
import numpy as np | ||
import cv2 as cv | ||
import matplotlib.pyplot as plt | ||
from matplotlib import cm | ||
|
||
alpha = 0.2 | ||
|
||
def draw_features_full(img1,feature_masks): | ||
for mask in feature_masks: | ||
|
||
# print(mask.shape) | ||
r_channel = mask.copy() | ||
r_channel[mask != 0] = 0 | ||
g_channel = mask.copy() | ||
g_channel[mask != 0] = 255 | ||
b_channel = mask.copy() | ||
b_channel[mask != 0] = 255 | ||
mask = cv.cvtColor(mask,cv.COLOR_GRAY2RGB) | ||
mask[:,:,0] = r_channel | ||
mask[:,:,1] = g_channel | ||
mask[:,:,2] = b_channel | ||
|
||
cv.addWeighted(mask, alpha, img1, 1,0, img1) | ||
return img1 | ||
|
||
def draw_features(img1,feature_masks): | ||
for mask in feature_masks: | ||
|
||
cont = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) | ||
cv.drawContours(img1, cont[0], -1, (0, 255, 255)) | ||
|
||
# # print(mask.shape) | ||
# r_channel = mask.copy() | ||
# r_channel[mask != 0] = 0 | ||
# g_channel = mask.copy() | ||
# g_channel[mask != 0] = 255 | ||
# b_channel = mask.copy() | ||
# b_channel[mask != 0] = 255 | ||
# mask = cv.cvtColor(mask,cv.COLOR_GRAY2RGB) | ||
# mask[:,:,0] = r_channel | ||
# mask[:,:,1] = g_channel | ||
# mask[:,:,2] = b_channel | ||
|
||
# cv.addWeighted(mask, alpha, img1, 1,0, img1) | ||
return img1 | ||
|
||
|
||
def draw_features_bsif(img1,feature_masks): | ||
zeros = np.zeros((img1.shape)) | ||
for mask in feature_masks: | ||
|
||
zeros[mask==255,:] = img1[mask==255,:] | ||
|
||
cont = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) | ||
cv.drawContours(zeros, cont[0], -1, (255, 162, 0),2) | ||
|
||
# # print(mask.shape) | ||
# r_channel = mask.copy() | ||
# r_channel[mask != 0] = 0 | ||
# g_channel = mask.copy() | ||
# g_channel[mask != 0] = 255 | ||
# b_channel = mask.copy() | ||
# b_channel[mask != 0] = 255 | ||
# mask = cv.cvtColor(mask,cv.COLOR_GRAY2RGB) | ||
# mask[:,:,0] = r_channel | ||
# mask[:,:,1] = g_channel | ||
# mask[:,:,2] = b_channel | ||
|
||
# cv.addWeighted(mask, alpha, img1, 1,0, img1) | ||
return zeros,img1 | ||
|
||
def display_matching(probe,gallery,matches,feature_masks,matching_score,classification,original_probe,original_gallery): | ||
|
||
img1 = probe | ||
img2 = gallery | ||
|
||
img1 = draw_features(img1,feature_masks["probe"]) | ||
img2 = draw_features(img2,feature_masks["gallery"]) | ||
|
||
comb_image = np.concatenate((img1,img2),axis=1) | ||
# overlay = comb_image.copy() | ||
for coord_pair in matches: | ||
# print(coord_pair) | ||
coord_1 = coord_pair[0] | ||
coord_2 = coord_pair[1] | ||
|
||
line_thickness = 2 | ||
# cv.line(overlay, (coord_1[0], coord_1[1]), (coord_2[0]+256, coord_2[1]), (0, 0, 255), thickness=line_thickness) | ||
cv.line(comb_image, (coord_1[0], coord_1[1]), (coord_2[0]+256, coord_2[1]), (0, 0, 255), thickness=line_thickness) | ||
|
||
# cv.addWeighted(overlay, 0.6, comb_image, 1,0, comb_image) | ||
comb_original = np.concatenate((original_probe,original_gallery),axis=1) | ||
|
||
comb_both = np.concatenate((comb_original,comb_image),axis=0) | ||
# comb_both = comb_image | ||
|
||
plt.title("Matching Score: " + str(matching_score) + ", Classification: " + classification) | ||
frame1 = plt.gca() | ||
frame1.axes.get_xaxis().set_visible(False) | ||
frame1.axes.get_yaxis().set_visible(False) | ||
frame1.imshow(comb_both) | ||
|
||
|
||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,125 @@ | ||
name: pbm | ||
channels: | ||
- defaults | ||
dependencies: | ||
- _libgcc_mutex=0.1=main | ||
- _tflow_select=2.1.0=gpu | ||
- absl-py=0.11.0=pyhd3eb1b0_1 | ||
- astor=0.8.1=py37_0 | ||
- astropy=4.2=py37h27cfd23_0 | ||
- blas=1.0=mkl | ||
- blosc=1.20.1=hd408876_0 | ||
- brotli=1.0.9=he6710b0_2 | ||
- bzip2=1.0.8=h7b6447c_0 | ||
- c-ares=1.17.1=h27cfd23_0 | ||
- ca-certificates=2021.1.19=h06a4308_0 | ||
- certifi=2020.12.5=py37h06a4308_0 | ||
- charls=2.1.0=he6710b0_2 | ||
- cloudpickle=1.6.0=py_0 | ||
- cudatoolkit=10.1.243=h6bb024c_0 | ||
- cudnn=7.6.5=cuda10.1_0 | ||
- cupti=10.1.168=0 | ||
- cycler=0.10.0=py37_0 | ||
- cytoolz=0.11.0=py37h7b6447c_0 | ||
- dask-core=2021.1.0=pyhd3eb1b0_0 | ||
- dbus=1.13.18=hb2f20db_0 | ||
- decorator=4.4.2=py_0 | ||
- expat=2.2.10=he6710b0_2 | ||
- fontconfig=2.13.0=h9420a91_0 | ||
- freetype=2.10.4=h5ab3b9f_0 | ||
- gast=0.4.0=py_0 | ||
- giflib=5.1.4=h14c3975_1 | ||
- glib=2.66.1=h92f7085_0 | ||
- google-pasta=0.2.0=py_0 | ||
- grpcio=1.31.0=py37hf8bcb03_0 | ||
- gst-plugins-base=1.14.0=h8213a91_2 | ||
- gstreamer=1.14.0=h28cd5cc_2 | ||
- h5py=2.10.0=py37hd6299e0_1 | ||
- hdf5=1.10.6=hb1b8bf9_0 | ||
- icu=58.2=he6710b0_3 | ||
- imagecodecs=2020.5.30=py37hfa7d478_2 | ||
- imageio=2.9.0=py_0 | ||
- importlib-metadata=2.0.0=py_1 | ||
- intel-openmp=2020.2=254 | ||
- joblib=1.0.0=pyhd3eb1b0_0 | ||
- jpeg=9b=h024ee3a_2 | ||
- jxrlib=1.1=h7b6447c_2 | ||
- keras=2.2.4=0 | ||
- keras-applications=1.0.8=py_1 | ||
- keras-base=2.2.4=py37_0 | ||
- keras-preprocessing=1.1.0=py_1 | ||
- kiwisolver=1.3.0=py37h2531618_0 | ||
- lcms2=2.11=h396b838_0 | ||
- ld_impl_linux-64=2.33.1=h53a641e_7 | ||
- libaec=1.0.4=he6710b0_1 | ||
- libedit=3.1.20191231=h14c3975_1 | ||
- libffi=3.3=he6710b0_2 | ||
- libgcc-ng=9.1.0=hdf63c60_0 | ||
- libgfortran-ng=7.3.0=hdf63c60_0 | ||
- libpng=1.6.37=hbc83047_0 | ||
- libprotobuf=3.13.0.1=hd408876_0 | ||
- libstdcxx-ng=9.1.0=hdf63c60_0 | ||
- libtiff=4.1.0=h2733197_1 | ||
- libuuid=1.0.3=h1bed415_2 | ||
- libwebp=1.0.1=h8e7db2f_0 | ||
- libxcb=1.14=h7b6447c_0 | ||
- libxml2=2.9.10=hb55368b_3 | ||
- libzopfli=1.0.3=he6710b0_0 | ||
- lz4-c=1.9.2=heb0550a_3 | ||
- markdown=3.3.3=py37h06a4308_0 | ||
- matplotlib=3.3.2=h06a4308_0 | ||
- matplotlib-base=3.3.2=py37h817c723_0 | ||
- mkl=2020.2=256 | ||
- mkl-service=2.3.0=py37he8ac12f_0 | ||
- mkl_fft=1.2.0=py37h23d657b_0 | ||
- mkl_random=1.1.1=py37h0573a6f_0 | ||
- ncurses=6.2=he6710b0_1 | ||
- networkx=2.5=py_0 | ||
- numpy=1.19.2=py37h54aff64_0 | ||
- numpy-base=1.19.2=py37hfa32c7d_0 | ||
- olefile=0.46=py37_0 | ||
- openjpeg=2.3.0=h05c96fa_1 | ||
- openssl=1.1.1i=h27cfd23_0 | ||
- pandas=1.2.0=py37ha9443f7_0 | ||
- pcre=8.44=he6710b0_0 | ||
- pillow=8.1.0=py37he98fc37_0 | ||
- pip=20.3.3=py37h06a4308_0 | ||
- protobuf=3.13.0.1=py37he6710b0_1 | ||
- pyerfa=1.7.1.1=py37h27cfd23_1 | ||
- pyparsing=2.4.7=pyhd3eb1b0_0 | ||
- pyqt=5.9.2=py37h05f1152_2 | ||
- python=3.7.9=h7579374_0 | ||
- python-dateutil=2.8.1=py_0 | ||
- pytz=2020.5=pyhd3eb1b0_0 | ||
- pywavelets=1.1.1=py37h7b6447c_2 | ||
- pyyaml=5.3.1=py37h7b6447c_1 | ||
- qt=5.9.7=h5867ecd_1 | ||
- readline=8.0=h7b6447c_0 | ||
- scikit-image=0.17.2=py37hdf5156a_0 | ||
- scikit-learn=0.23.2=py37h0573a6f_0 | ||
- scipy=1.5.2=py37h0b6359f_0 | ||
- setuptools=51.3.3=py37h06a4308_4 | ||
- sip=4.19.8=py37hf484d3e_0 | ||
- six=1.15.0=py37h06a4308_0 | ||
- snappy=1.1.8=he6710b0_0 | ||
- sqlite=3.33.0=h62c20be_0 | ||
- tensorboard=1.14.0=py37hf484d3e_0 | ||
- tensorflow=1.14.0=gpu_py37h74c33d7_0 | ||
- tensorflow-base=1.14.0=gpu_py37he45bfe2_0 | ||
- tensorflow-estimator=1.14.0=py_0 | ||
- tensorflow-gpu=1.14.0=h0d30ee6_0 | ||
- termcolor=1.1.0=py37_1 | ||
- threadpoolctl=2.1.0=pyh5ca1d4c_0 | ||
- tifffile=2021.1.14=pyhd3eb1b0_1 | ||
- tk=8.6.10=hbc83047_0 | ||
- toolz=0.11.1=pyhd3eb1b0_0 | ||
- tornado=6.1=py37h27cfd23_0 | ||
- tqdm=4.55.1=pyhd3eb1b0_0 | ||
- werkzeug=1.0.1=py_0 | ||
- wheel=0.36.2=pyhd3eb1b0_0 | ||
- wrapt=1.12.1=py37h7b6447c_1 | ||
- xz=5.2.5=h7b6447c_0 | ||
- yaml=0.2.5=h7b6447c_0 | ||
- zipp=3.4.0=pyhd3eb1b0_0 | ||
- zlib=1.2.11=h7b6447c_3 | ||
- zstd=1.4.5=h9ceee32_0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
9062R_1_3.png,9062R_1_1.png | ||
9052R_5_1.png,9025L_1_3.png | ||
9015L_1_3.png,9015L_1_2.png |
Oops, something went wrong.