"""
*************************************************************************
// Example program using OpenCV library
//      python >3.7 - OpenCV 4.5
// @file	e7.py
// @author Luis M. Jimenez
// @date 2022
//
// @brief Course: Computer Vision (1782)
// Dept. of Systems Engineering and Automation
// Automation, Robotics and Computer Vision Lab (ARVC)
// http://arvc.umh.es
// University Miguel Hernandez
//
// @note Description:
//	- Shows the use of feature points detectors/descriptors
//
//	- Capture images from a camera 
//	- Detects featured points (ORB/SIFT/SURF) using common interface FeatureDetector
//	- Calculates Descriptor vector (ORB/SIFT/SURF) using common interface DescriptorExtractor
//	- Draw detected points over captured image
//
*************************************************************************
"""

# Import libraries
import cv2 as cv
import numpy as np
import argparse

# -----------------------------------------
# Global variables
# -----------------------------------------

WINDOW_CAMERA1 = '(W1) Camera 1 (Feature Detector)'   # window id
CAMERA_ID = 0	                   # default camera
FEATURE_TYPE = 'sift'              # Default Feature Detector sift/surf/orb

# check command line parameters (camera id, feature, descriptor)
parser = argparse.ArgumentParser(description='OpenCV example: Feature Points Detector')
parser.add_argument('-c', dest='cameraID', type=int, default=CAMERA_ID, metavar='id', help='camera id')
parser.add_argument('-f', dest='feature', type=str, default=FEATURE_TYPE, metavar='feature', help='sift | surf | orb')

CAMERA_ID = parser.parse_args().cameraID
FEATURE_TYPE = parser.parse_args().feature.lower()          # feature (lowercase)

# -----------------------------------------
# Put here the code to Initialize objets
# -----------------------------------------

# Open camera object
camera = cv.VideoCapture(CAMERA_ID)
if not camera.isOpened():
    print("you need to connect a camera, sorry.")
    exit()

# lower resolution to speed feature extraction
camera.set(cv.CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv.CAP_PROP_FRAME_HEIGHT, 480)

# Getting camera resolution
cameraWidth = int(camera.get(cv.CAP_PROP_FRAME_WIDTH))
cameraHeight = int(camera.get(cv.CAP_PROP_FRAME_HEIGHT))

# Creating visualization windows
cv.namedWindow(WINDOW_CAMERA1, cv.WINDOW_AUTOSIZE)

# Init FeaturePoint Detector
if FEATURE_TYPE == 'sift':
    detector = cv.SIFT.create(nfeatures=100)
elif FEATURE_TYPE == 'orb':
    detector = cv.ORB.create(nfeatures=100)
elif FEATURE_TYPE == 'surf':
    detector = cv.xfeatures2d.SURF.create(hessianThreshold=400)
else:
    detector = cv.SIFT.create(nfeatures=100)    # Default detector


# Detector information
print(f"{FEATURE_TYPE=}")
print(f"{detector.descriptorSize()=}")

print(f"Capturing images from camera {CAMERA_ID} ({cameraWidth},{cameraHeight})")
print("...Hit q/Q/Esc to exit.")

# -----------------------------------------
# Main Loop
# while there are images ...
# -----------------------------------------
while True:
    # Capture frame-by-frame
    ret, capture = camera.read()

    # if frame is read correctly ret is True
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    # -----------------------------------------
    # Put your image processing code here
    # -----------------------------------------
    gray_image = cv.cvtColor(capture, cv.COLOR_BGR2GRAY)

    keypoints = detector.detect(gray_image)                                 # Tuple of keypoints
    keypoints, descriptors = detector.compute(gray_image, keypoints)        # Descriptor matrix (ndarray)

    # -----------------------------------------
    # Put your visualization code here
    # -----------------------------------------

    # Draw keyPoints
    dispimage = cv.drawKeypoints(capture, keypoints, outImage=None, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
 
    # Show #KeyPoints
    cv.putText(dispimage, f"Detected: {len(keypoints)} {FEATURE_TYPE.upper()} points", org=(5, 15),
               fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.4, color=(0, 0, 255), thickness=1, lineType=cv.LINE_AA)


    cv.imshow(WINDOW_CAMERA1, dispimage)     # Display the resulting frame


    # check keystroke to exit (image window must be on focus)
    key = cv.pollKey()
    if key == ord('q') or key == ord('Q') or key == 27:
        break

# End while (main loop)



# -----------------------------------------
# free windows and camera resources
# -----------------------------------------
cv.destroyAllWindows()
if camera.isOpened():  camera.release()
