Skip to content

Commit

Permalink
Merge branch 'main' of github.com:97hackbrian/embedded-labs
Browse files Browse the repository at this point in the history
  • Loading branch information
97hackbrian committed Nov 16, 2023
2 parents 6c9fdfc + f15b881 commit 7e2eba2
Show file tree
Hide file tree
Showing 7 changed files with 956 additions and 0 deletions.
29 changes: 29 additions & 0 deletions Raspberry/Lab9/ej1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import cv2

# Cargar el video
cap = cv2.VideoCapture('Raspberry/Lab9/recursos_lab_9/bouncing.mp4')

# Seleccionar el algoritmo de sustracción de fondo
#subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
#subtractor = cv2.createBackgroundSubtractorMOG2()
subtractor = cv2.createBackgroundSubtractorKNN() #mejor
#subtractor = cv2.bgsegm.createBackgroundSubtractorGMG()



while True:
ret, frame = cap.read()
if not ret:
break

# Aplicar el algoritmo de sustracción de fondo al frame actual
fg_mask = subtractor.apply(frame)

# Visualizar el resultado
cv2.imshow('Foreground Mask', fg_mask)

if cv2.waitKey(30) & 0xFF == 27: # Presionar 'Esc' para salir
break

cap.release()
cv2.destroyAllWindows()
53 changes: 53 additions & 0 deletions Raspberry/Lab9/ej2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import cv2
import numpy as np

# Open the webcam
cap = cv2.VideoCapture(0) # Use 0 for default webcam
desired_width = 640
desired_height = 480
cap.set(cv2.CAP_PROP_FRAME_WIDTH, desired_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, desired_height)

desired_fps = 30
cap.set(cv2.CAP_PROP_FPS, desired_fps)
# Create a background subtractor
subtractor = cv2.createBackgroundSubtractorMOG2()

while True:
ret, frame = cap.read()
if not ret:
break

# Apply background subtraction
frame=cv2.GaussianBlur(frame,(19,19),0)
fg_mask = subtractor.apply(frame)

# Threshold the foreground mask to obtain binary image
_, thresh = cv2.threshold(fg_mask, 128, 255, cv2.THRESH_BINARY)

# Find contours in the binary image
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# Iterate through the contours
for contour in contours:
# Calculate the bounding box of the contour
x, y, w, h = cv2.boundingRect(contour)

# Check if the object is passing through the middle of the frame
if x < frame.shape[1] // 2 < x + w and y < frame.shape[0] // 2 < y + h:
cv2.putText(frame, "Object Detected", (frame.shape[1] - 200, frame.shape[0] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

# Draw the bounding box around the object
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

# Display the resulting frame
cv2.imshow('Object Detection', frame)

# Break the loop if 'Esc' key is pressed
if cv2.waitKey(30) & 0xFF == 27:
break

# Release the webcam and close all windows
cap.release()
cv2.destroyAllWindows()
25 changes: 25 additions & 0 deletions Raspberry/Lab9/ej3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from pixie import img, showIMG, video, videosPlays
import cv2

videCon = []

if __name__ == "__main__":
video1 = video()
video1.load("Raspberry/Lab9/recursos_lab_9/bouncing.mp4")

videOriginal = video1.retorno()

for frame in videOriginal:
# Aplicar el algoritmo Canny
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 240)

# Encontrar contornos
contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

# Dibujar contornos en el frame original
cv2.drawContours(frame, contours, -1, (0, 0, 255), 1)

videCon.append(frame)

videosPlays(video1, [videCon])
40 changes: 40 additions & 0 deletions Raspberry/Lab9/ej4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import cv2
import numpy as np

# Cargar la imagen
color = cv2.imread('/home/hackbrian/Documentos/gitProyects/embedded-labs/Raspberry/Lab9/recursos_lab_9/monedas_2.jpg')
img = cv2.imread('/home/hackbrian/Documentos/gitProyects/embedded-labs/Raspberry/Lab9/recursos_lab_9/monedas_2.jpg', cv2.IMREAD_GRAYSCALE)
img=cv2.resize(img,(600,400))
color=cv2.resize(color,(600,400))
# Aplicar la adaptación del umbral gaussiano
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11, 1)

# Aplicar el desenfoque gaussiano
#img = cv2.GaussianBlur(img, (5, 5), 0)

# Aplicar Canny para encontrar bordes
#contoursCanny = cv2.Canny(img, 0, 200)

# Detectar círculos utilizando HoughCircles
circles = cv2.HoughCircles(
img,
cv2.HOUGH_GRADIENT, dp=1, minDist=115,
param1=100, param2=15, minRadius=65, maxRadius=70
)

con=0
# Si se detectan círculos, dibujarlos
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
con=con+1
# Dibujar el círculo exterior
cv2.circle(color, (i[0], i[1]), i[2], (0, 255, 0), 2)
# Dibujar el centro del círculo
cv2.circle(color, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.putText(color, str(con), (i[0], i[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (255, 110, 100), 3)

# Mostrar la imagen con círculos detectados
cv2.imshow('Circulos Detectados', color)
cv2.waitKey(0)
cv2.destroyAllWindows()
96 changes: 96 additions & 0 deletions Raspberry/Lab9/ej7.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import cv2
import sys
from time import sleep
import numpy as np
sys.path.append('/root/Desktop/embedded-labs/tankbot')
from libs.tracker import *
from libs.tiva import *

tiva1 = InitSerial(baud=9600)
motors = Motors(serial_instance=tiva1)
Leds = LedControl(serial_instance=tiva1)
Leds.init_system(cam=0) # Repair cam=1

# Create tracker object
tracker = EuclideanDistTracker()

cap = cv2.VideoCapture(0)

# Object detection from stable camera
while True:
ret, frame = cap.read()
frame = frame[300:,: ]
if not ret:
break

frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

# Define lower and upper bounds for the colors you want to detect
lower_blue = np.array([0, 0, 0])
upper_blue = np.array([0, 0, 0])
lower_green = np.array([0, 0, 0])
upper_green = np.array([0, 0, 0])
lower_red1 = np.array([0, 50, 50])
upper_red1 = np.array([10, 150, 255])
lower_red2 = np.array([160, 50, 50])
upper_red2 = np.array([180, 150, 255])

# Create masks for the colors
mask_blue = cv2.inRange(frame_hsv, lower_blue, upper_blue)
mask_green = cv2.inRange(frame_hsv, lower_green, upper_green)
mask_red1 = cv2.inRange(frame_hsv, lower_red1, upper_red1)
mask_red2 = cv2.inRange(frame_hsv, lower_red2, upper_red2)

# Combine the red masks to handle the wrap-around in the hue space
mask_red = mask_red1 + mask_red2

# Combine all masks
mask = mask_blue + mask_green + mask_red

# Find contours in the combined mask
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

for cnt in contours:
# Calculate area and remove small elements
area = cv2.contourArea(cnt)
if len(cnt)>=2:
Leds.write(1,1,0,0)
sleep(0.5)
Leds.write(0,0,1,1)
sleep(0.5)
motors.stop()

if area > 1000:
if len(cnt)>=2:
Leds.write(1,1,0,0)
sleep(0.5)
Leds.write(0,0,1,1)
sleep(0.5)
motors.stop()

else:
x, y, w, h = cv2.boundingRect(cnt)
center_x = x + w // 2

if center_x > 420:
print("derecha")
motors.move(70, -70)
elif center_x < 210:
print("izquierda")
motors.move(-84, 84)
else:
print("centro")
motors.stop()
if len(contours)==0:
motors.move(-70, 75)

cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)

key = cv2.waitKey(30)
if key == 27:
motors.stop()
break

cap.release()
cv2.destroyAllWindows()
Loading

0 comments on commit 7e2eba2

Please sign in to comment.