"""
*************************************************************************
// Example program using OpenCV library
//      python >3.7 - OpenCV 4.5
// @file	e6b.py
// @author Luis M. Jimenez
// @date 2022
//
// @brief Course: Computer Vision (1782)
// Dept. of Systems Engineering and Automation
// Automation, Robotics and Computer Vision Lab (ARVC)
// http://arvc.umh.es
// University Miguel Hernandez
//
// @note Description:
//  - Load training data Cascade Classifiers
//  - Test Detection  with camera images
//
*************************************************************************
"""

# Import libraries
import cv2 as cv
import numpy as np
import argparse


# -----------------------------------------
# Global variables
# -----------------------------------------

WINDOW_CAMERA1 = '(W1) Camera 1'   # window id
CAMERA_ID = 0	                   # default camera

CASCADE_FACE_FILE = '../data/haarcascades/haarcascade_frontalface_alt.xml'
CASCADE_EYES_FILE = '../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml'

# check command line parameters (imageFile)
parser = argparse.ArgumentParser(description='OpenCV example: classification')
parser.add_argument('-c', dest='cameraID', type=int, default=CAMERA_ID, metavar='id', help='camera id')
parser.add_argument('faceFile', nargs='?', default=CASCADE_FACE_FILE,  help='Face classifier file')
parser.add_argument('eyesFile', nargs='?', default=CASCADE_EYES_FILE,  help='Eyes classifier file')

CAMERA_ID = parser.parse_args().cameraID
CASCADE_FACE_FILE = parser.parse_args().faceFile
CASCADE_EYES_FILE = parser.parse_args().eyesFile

# -----------------------------------------
# Put here the code to Initialize objets
# -----------------------------------------

# Load cascade trained classifiers
face_cascade = cv.CascadeClassifier(CASCADE_FACE_FILE)
eyes_cascade = cv.CascadeClassifier(CASCADE_EYES_FILE)

pedestrian_detector = cv.HOGDescriptor()
pedestrian_detector.setSVMDetector(cv.HOGDescriptor.getDefaultPeopleDetector())



if face_cascade.empty() or eyes_cascade.empty():
    print(f"Error loading Cascade Classifiers")
    exit(-1)

# Open camera object
camera = cv.VideoCapture(CAMERA_ID)
if not camera.isOpened():
    print("you need to connect a camera, sorry.")
    exit()

# Getting camera resolution
cameraWidth = int(camera.get(cv.CAP_PROP_FRAME_WIDTH))
cameraHeight = int(camera.get(cv.CAP_PROP_FRAME_HEIGHT))

# Creating visualization windows
cv.namedWindow(WINDOW_CAMERA1, cv.WINDOW_AUTOSIZE)

print(f"Capturing images from camera {CAMERA_ID} ({cameraWidth},{cameraHeight})")
print("...Hit q/Q/Esc to exit.")

# -----------------------------------------
# Main Loop
# while there are images ...
# -----------------------------------------
while True:
    # Capture frame-by-frame
    ret, capture = camera.read()

    # if frame is read correctly ret is True
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    # -----------------------------------------
    # Put your image processing code here
    # -----------------------------------------

    # Transform to gray level
    gray_image = cv.cvtColor(capture, cv.COLOR_BGR2GRAY)
    gray_image = cv.equalizeHist(gray_image)	# Normalize gray levels

    # Detect faces
    faces = face_cascade.detectMultiScale(gray_image)
    for (x,y,w,h) in faces:
        center = (x + w // 2, y + h // 2)
        cv.ellipse(capture, center, (w // 2, h // 2), angle=0, startAngle=0, endAngle=360, color=(255, 0, 255), thickness=4)

        faceROI = gray_image[y:y + h, x:x + w]
        # -- In each face, detect eyes
        eyes = eyes_cascade.detectMultiScale(faceROI)
        for (x2, y2, w2, h2) in eyes:
            eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
            radius = int(round((w2 + h2) * 0.25))
            cv.circle(capture, eye_center, radius, color=(255, 0, 0), thickness=4)

    # Detect pedestrians
    foundLocations, foundWeigths = pedestrian_detector.detectMultiScale(gray_image)
    for id in range(len(foundLocations)):
        (x,y,w,h) = tuple(foundLocations[id])
        cv.rectangle(capture, (x,y), (x+w, y+h), color=(0, 0, 200), thickness=2)
        text = f"{foundWeigths[id]:.2}"
        textSize, baseline = cv.getTextSize(text, cv.FONT_HERSHEY_DUPLEX, 0.3, 1)
        cv.rectangle(capture, (x, y), (x+10+textSize[0], y-10-textSize[1]), color=(0, 0, 200), thickness=cv.FILLED)
        cv.putText(capture, text, (x+5,y-5), cv.FONT_HERSHEY_DUPLEX, 0.3, (0, 0, 0), 1, cv.LINE_AA)


    # -----------------------------------------
    # Put your visualization code here
    # -----------------------------------------
    cv.imshow(WINDOW_CAMERA1, capture)     # Display the resulting frame

    # check keystroke to exit (image window must be on focus)
    key = cv.pollKey()
    if key == ord('q') or key == ord('Q') or key == 27:
        break

# End while (main loop)

# -----------------------------------------
# free windows and camera resources
# -----------------------------------------
cv.destroyAllWindows()
if camera.isOpened():  camera.release()


# -----------------------------------------
# free windows and camera resources
# -----------------------------------------
pass
