Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Using detectParking for a picture instead of video #5

Open
Vafa-Andalibi opened this issue Mar 29, 2018 · 4 comments
Open

Using detectParking for a picture instead of video #5

Vafa-Andalibi opened this issue Mar 29, 2018 · 4 comments

Comments

@Vafa-Andalibi
Copy link

Hello,

Is it possible to use detectParking on a picture of a parking lot instead of the video?
If so, could you please give me a guideline how to do so?

Thanks,

@cnd
Copy link

cnd commented Mar 30, 2018

there is while with VideoCapture::read() inside this function program asks for new frame every time, read is combo of VideoCapture::grab() and VideoCapture::retrieve() so for example you can do not call VideoCapture::grab() or check if output result of this function is true

@OAlramahy
Copy link

I also have the same issue... would be cool if someone can post the resolution.

@errolyan
Copy link

Hello,

Is it possible to use detectParking on a picture of a parking lot instead of the video?
If so, could you please give me a guideline how to do so?

Thanks,

do you have solved this problems

@Vafa-Andalibi
Copy link
Author

This is the modified version of the python code that I wrote based on this repo's code:

#!/usr/bin/env python

import yaml
import numpy as np
import cv2
#from matplotlib import pyplot as plt
import imageio
import time
import sys

debug = False 
assert len(sys.argv) == 4, 'Usage: sportdetector.py input_image output_image output_text'
fn_yaml = r"/path/to/yaml/spots_mapping.yml"
fn2 = sys.argv[1]
output_image = sys.argv[2]
output_text = sys.argv[3]
#print 'Updaing the slot.txt every 60 seconds ... '
#while True:
config = {'save_video': False,
          'text_overlay': True,
          'parking_overlay': True,
          'parking_id_overlay': True,
          'parking_detection': True,
          'motion_detection': False,
          'pedestrian_detction': False,
          'min_area_motion_contour': 150,
          'park_laplacian_th': [1.7]*18,
          'park_sec_to_wait': 5,
          'start_frame': 0} #35000
##### manual configs:
config['park_laplacian_th'][8] = 2
config['park_laplacian_th'][10] = 2
#config['park_laplacian_th'][7] = 1.3
#config['park_laplacian_th'][2] = 3.05
#'park_laplacian_th': 3.5,
cap = cv2.imread(fn2)
with open(fn_yaml, 'r') as stream:
    parking_data = yaml.load(stream)

parking_contours = []
parking_bounding_rects = []
parking_mask = []

for park in parking_data:
    points = np.array(park['points'])
    rect = cv2.boundingRect(points)
    points_shifted = points.copy()
    points_shifted[:,0] = points[:,0] - rect[0] # shift contour to roi
    points_shifted[:,1] = points[:,1] - rect[1]
    parking_contours.append(points)
    parking_bounding_rects.append(rect)
    mask = cv2.drawContours(np.zeros((rect[3], rect[2]), dtype=np.uint8), [points_shifted], contourIdx=-1,
                            color=255, thickness=-1, lineType=cv2.LINE_8)
    mask = mask==255
    parking_mask.append(mask)
#     plt.imshow(mask)

parking_status = [False]*len(parking_data)
parking_buffer = [None]*len(parking_data)

frame_blur = cv2.GaussianBlur(cap[:], (5,5), 3)
frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)
frame_out = cap[:]

all_deltas= []

for ind, park in enumerate(parking_data):
    points = np.array(park['points'])
    rect = parking_bounding_rects[ind]
    roi_gray = frame_gray[rect[1]:(rect[1] + rect[3]), rect[0]:(rect[0] + rect[2])]  # crop roi for faster calcluation
    laplacian = cv2.Laplacian(roi_gray, cv2.CV_64F)
    points[:, 0] = points[:, 0] - rect[0]  # shift contour to roi
    points[:, 1] = points[:, 1] - rect[1]
    delta = np.mean(np.abs(laplacian * parking_mask[ind]))
    if debug:
    	print "index: %d , delta: %f"%(ind,delta)
    all_deltas.append(delta)
    status = delta < config['park_laplacian_th'][ind]
    parking_status[ind] = status

for ind, park in enumerate(parking_data):
    points = np.array(park['points'])
    if parking_status[ind]: color = (0,255,0)
    else: color = (255,0,0)
    cv2.drawContours(frame_out, [points], contourIdx=-1,
                     color=color, thickness=2, lineType=cv2.LINE_8)
    moments = cv2.moments(points)
    centroid = (int(moments['m10']/moments['m00'])-3, int(moments['m01']/moments['m00'])+3)
    cv2.putText(frame_out, str(park['id']), (centroid[0]+1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
    cv2.putText(frame_out, str(park['id']), (centroid[0]-1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
    cv2.putText(frame_out, str(park['id']), (centroid[0]+1, centroid[1]-1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
    cv2.putText(frame_out, str(park['id']), (centroid[0]-1, centroid[1]+1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
    cv2.putText(frame_out, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)

#    plt.imshow(frame_out)
#    plt.show()
if sum(all_deltas) > 10:
    imageio.imwrite(output_image,frame_out)
    with open (output_text , 'w') as wr:
        wr.write(''.join([str(parking_data[i]['id'])+'\n' for i, x in enumerate(parking_status) if x==True]))
#    time.sleep(60)

The yaml file example is similar to the one provided in this repo.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

4 participants