Commit 4b5017a5 authored by William Michalski's avatar William Michalski
Browse files

Ajout de l'objet Tracker, qui s'occupe de déterminer la prochaine position à atteindre

parent f7d2d4bd
......@@ -24,11 +24,13 @@ def centroid(img):
cX, cY = 0, 0
return cX, cY
img = cv2.imread("test_continuous.jpg")
img = cv2.imread("frame64.jpg")
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)#Espace HSV, plus simple de separer les couleurs
#Paramètre du seuillage par couleur, à adapter au besoin
red_lower = np.array([136, 50, 50], np.uint8)
#red_lower = np.array([136, 50, 50], np.uint8)
#red_upper = np.array([180, 255, 255], np.uint8)
red_lower = np.array([120, 50, 50], np.uint8)
red_upper = np.array([180, 255, 255], np.uint8)
green_lower = np.array([25, 52, 72], np.uint8)
green_upper = np.array([102, 255, 255], np.uint8)
......
# Credit: Adrian Rosebrock
# https://www.pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python/
# import the necessary packages
from picamera.array import PiRGBArray # Generates a 3D RGB array
from picamera import PiCamera # Provides a Python interface for the RPi Camera Module
import time # Provides time-related functions
import cv2 # OpenCV library
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from math import sqrt
import serial
from Detection_elements_image import *
mask = cv2.imread("Images/mask2.png")
#############################################################
### PARAMETRES MODELE ET PERFORMANCES ASSERVISSEMENT
kb = 6.1; # Parametre de la modelisation du systeme
Te = 0.1; # Periode d'echantillonnage
alpha = 0.5; # Double zero polynome C(z) = (z-alpha)**2 (D(z) = C(z)*P(z))
tps_rep = 1.0; # Temps de reponse
tau = tps_rep/5.0; # Constante de temps tau
z1 = np.exp(-Te/tau); # Double zero polynome P(z) = (z-z1)**2 (double pole de la boucle fermee)
### COEFFICIENTS POLYNOMES
K = kb*Te*Te/2.0; # Gain du systeme echantillone
# Coefs polynome S
s0 = 1.0;
s1 = 2.0 - 2.0*(alpha + z1);
s2 = (1.0/4.0)*(z1*z1+4.0*z1*alpha+alpha*alpha-1.0+6.0*(1-alpha-z1)+2.0*z1*alpha*(alpha+z1)+z1*z1*alpha*alpha);
# Coefs polynome R
r0 = (1.0/K)*(-2.0*z1*alpha*(alpha+z1)-2.0*(1-alpha-z1)+3.0*s2-z1*z1*alpha*alpha);
r1 = (1.0/K)*(z1*z1*alpha*alpha-s2);
Q = (r0 + r1)/((1-alpha)*(1-alpha));
### PARAMETRES SIMULATION
nb_pas = 3; # Nombre de pas (k) simules
simuX = np.zeros((3, nb_pas)); # ligne 0 : yc, ligne 1 : u et ligne 2 : y
simuY = np.zeros((3, nb_pas)); # ligne 0 : yc, ligne 1 : u et ligne 2 : y
#[[yc[k-2],yc[k-1],y[k]],
# [u[k-2], u[k-1], u[k]],
# [y[k-2], y[k-1], y[k]]]
### FONCTIONS DE CALCUL DES ECHANTILLONS TEMPORELS
def u(k, simu, saturation):
commande = ((-s1*simu[1,k-1] - s2*simu[1,k-2] + Q*simu[0,k] - 2*alpha*Q*simu[0,k-1] + Q*alpha*alpha*simu[0,k-2] - r0*simu[2,k-1] - r1*simu[2,k-2])/s0)
if(commande > saturation):
return saturation
elif (commande < -saturation):
return -saturation
else:
return commande
def y1(k, simu):
return (2*z1*simu[2,k-1] - z1*z1*simu[2,k-2] + Q*K*(simu[0,k-1] + simu[0,k-2]))
def y2(k, simu):
return (2*simu[2,k-1] - simu[2,k-2] + K*(simu[1,k-1] + simu[1,k-2]))
def swap(simu):
simu[0,0] = simu[0,1]
simu[0,1] = simu[0,2]
simu[1,0] = simu[1,1]
simu[1,1] = simu[1,2]
simu[2,0] = simu[2,1]
simu[2,1] = simu[2,2]
return;
########################################################
def centroid(img):
M = cv2.moments(img)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
return cX, cY
red_lower = np.array([136, 50, 50], np.uint8)
red_upper = np.array([180, 255, 255], np.uint8)
res = 480
rate = 40#FPS
fichier = open("mesure_tps_par_resolution.txt", "a")
# Initialize the camera
camera = PiCamera()
# Set the camera resolution
camera.resolution = (res,res)
# Set the number of frames per second
camera.framerate = rate
# Generates a 3D RGB array and stores it in rawCapture
raw_capture = PiRGBArray(camera, size=(res,res))
# Wait a certain number of seconds to allow the camera time to warmup
time.sleep(2)
# imgCenter = np.empty((480,480,3),dtype=np.uint8)
#
# camera.capture(imgCenter,'bgr')
#
# _,_,(cX, cY) = detect_tray_center(imgCenter,mask)
#
# print(cX, cY)
cX = 232
cY = 232
###########################################################################
ser = serial.Serial('/dev/ttyACM0', 57600, timeout = 1) # Connection a l'Arduino
ser.flush()
print("Connexion avec Arduino OK")
pos_mid = 512; # Correspond a 0 degree en inclinaison
range_max = 100; # Inclinaison maximale en nombre de pas (1024 pas au total pour 300 degrees)
pos_min = pos_mid - range_max;
pos_max = pos_mid + range_max;
pos = pos_mid
time.sleep(1) # Pour la synchronisation des communications
###########################################################################
sat = 2;
###########################################################################
posX = 0
posY = 0
temps1 = time.time()
for frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
# Grab the raw NumPy array representing the image
img = frame.array
#A decommenter pour mesurer avec le temps de detection de la bille
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)#Espace HSV, plus simple de separer les couleurs
img_red = cv2.inRange(img_hsv, red_lower, red_upper)
rX,rY = centroid(img_red)
cv2.circle(img, (rX,rY), 4, (0,233,255),-1)#Ajout d'un point sur l'image d'origine, pour montrer ou ce trouve le centre detecte
#A decommenter pour voir la video, mais penser que ça fausse la vitesse d'execution
cv2.imshow("Frame", img)
# Wait for keyPress for 1 millisecond
key = cv2.waitKey(1) & 0xFF
# Clear the stream in preparation for the next frame
raw_capture.truncate(0)
# If the `q` key was pressed, break from the loop
if key == ord("q"):
break
################################################################################
if(rX != 0 and rY != 0):
posX = rX - cX
posY = -rY + cY
#print("X: ", posX,"Y: ", posY)
# Consigne
simuX[0,2] = 0
simuY[0,2] = 0
# Simulation conjointe de u et y en fonction de yc
# Commande
simuX[1,2] = u(2, simuX, sat)
simuY[1,2] = u(2, simuY, sat)
# Sortie
simuX[2,2] = posX
simuY[2,2] = posY
#print('yc = ', simuX[0,2], ', u = ', simuX[1,2], ' et y = ', simuX[2,2])
swap(simuX)
swap(simuY)
#
# ################################################################################
#
factor = 3
pos = (int)(512 + (simuX[2,2]/factor)) + (int)(512 + (simuY[2,2]/factor))*1024
print("X: ", (int)(pos & 0x3FF) - 512,"Y: ", (int)((pos//1024) & 0x3FF) - 512)
temps2 = time.time()
while(temps2-temps1 < Te*3):
time.sleep(0.01)
temps2 = time.time()
#print(temps2-temps1)
temps1 = time.time()
ser.write((str(pos)+'\0').encode('utf-8'))
class Tracker(object):
EPS = 5.0#Limite du voisinage
MAX = 1000#Limite avant de considerer la bille perdu
def __init__(self,path):
self.path = path#Path est une liste de position (de tuples)
self.index = 0#L'indice du point suivant a atteindre dans le path
self.state = 0# Trois etats, 0, 1 et -1, pour "cherche", "arrive", "perdu"
def increment_index(self):
if(self.index<len(self.path)):
self.index += 1
else:
self.state = 1#Si index=N, alors on passe en etat "arrive"
def in_neighbours(self, X):
x,y = X
xO,yO = self.path[self.index]
dist = abs(x - xO)**2 + abs(y - yO)**2
if(dist>self.MAX):
self.state = -1#Si on est trop loin de l'objectif, on passe en etat "perdu"
return dist<self.EPS
def get_next_position(self, X):
if self.in_neighbours(X):#Si on a atteint le voisinage de l'objectif, on change l'objectif
self.increment_index()
return self.path[self.index]
def is_arrived(self):
return self.state == 1
def is_lost(self):
return self.state == -1
## Utilisation type :
#
# tracker = Tracker(path)#Initialisation du tracker
# for une iteration:
# if tracker.is_arrived():
# print("On est arrive !!")
# break
# img = capturer_une_frame()
# X = obtenir_position_bille()
# commande = tracker.get_next_position(X)
# if tracker.is_lost():
# print("Aie Aie Aie, on doit recalculer le pcc !")
# recommencer()
# asservissement(commande)
\ No newline at end of file
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 15:53:11 2021
@author: william
"""
class Vertex(object):
def __init__(self,x,y,obstacle):
self.x=x
self.y=y
self.obstacle = obstacle
self.cost=float('inf') #current distance from source node
self.px=None
self.py=None
self.belonging=0 #0,1,2 for out of list, in openList, in closedList
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y)
def _get_cost(self):
return self.cost
def dist(self, other):
return (other.x - self.x)**2 + abs(other.y - self.y)**2
def parenting(self,other):
self.px = other.x
self.py = other.y
def add_openList(self, openL):
openL.append(self)
self.belonging = 1
def in_openList(self):
return self.belonging == 1
def add_closedList(self, openL):
self.belonging = 2#La closedList est virtuelle, elle n'a pas besoin d'exister, on veut juste savoir si les sommets y appartiennent
def in_closedList(self):
return self.belonging == 2
def is_obstacle(self):
return self.obstacle == 0
def __str__(self):
if (self.in_closedList()):
str_belonging = "closedList"
elif (self.in_openList()):
str_belonging = "openList"
else:
str_belonging = "no list"
if(self.is_obstacle()):
str_obstacle = "yes"
else:
str_obstacle = "no"
return("(x,y) = ("+ str(self.x) + "," + str(self.y) + ")\n"
"(Px,Py) = ("+ str(self.px) + "," + str(self.py) + ")\n"
"Cost = "+ str(self.cost) + "\n"
"Obstacle : "+ str_obstacle + "\n"
"Belongs to : "+ str_belonging + "\n"
)
......@@ -51,17 +51,17 @@ def solve_maze(img, ball_position, finish_position):
return graph.path
#%% Test
#import matplotlib.pyplot as plt
#
#img = cv2.imread("Images/testLaby_bin.jpg")
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#l,c = img.shape
#img = cv2.resize(img,(c//20,l//20))
#ball_position, finish_position = (70,50), (17,50)
#path = solve_maze(img, ball_position, finish_position)
#
#for i in range(len(path)):
# img[path[i].x, path[i].y] = 150
# #print(str(path[i].x) + ";" + str(path[i].y))
#
#plt.figure(1), plt.imshow(img)
import matplotlib.pyplot as plt
img = cv2.imread("Images/testLaby_bin.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
l,c = img.shape
img = cv2.resize(img,(c//20,l//20))
ball_position, finish_position = (70,50), (17,50)
path = solve_maze(img, ball_position, finish_position)
for i in range(len(path)):
img[path[i].x, path[i].y] = 150
#print(str(path[i].x) + ";" + str(path[i].y))
plt.figure(1), plt.imshow(img)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment