"""
*************************************************************************
// Example program using OpenCV library
//      python >3.7 - OpenCV 4.5
// @file	e9b.py
// @author Luis M. Jimenez
// @date 2022
//
// @brief Course: Computer Vision (1782)
// Dept. of Systems Engineering and Automation
// Automation, Robotics and Computer Vision Lab (ARVC)
// http://arvc.umh.es
// University Miguel Hernandez
//
// @note Description:
//	- Loads camera calibration data from file (JSON type)
//	- Capture images from a camera
//	- Detects markers using ARUCO library
//  - Calculates POSE from camera calibration file
//	- Draws axis 3D on each maker over captured image
//
*************************************************************************
"""

# Import libraries
import cv2 as cv
import numpy as np
import argparse
import json

# -----------------------------------------
# Global variables
# -----------------------------------------

WINDOW_CAMERA1 = '(W1) Camera 1'   # window id
CAMERA_ID = 0	                   # default camera
CALIB_FILE = 'camera.json'         # default camera calibration file

# check command line parameters (camera id)
parser = argparse.ArgumentParser(description='OpenCV example: Markers detection - POSE')
parser.add_argument('-c', dest='cameraID', type=int, default=CAMERA_ID, metavar='id', help='camera id')
parser.add_argument('-cal', dest='calibFile', type=str, default=CALIB_FILE, metavar='calibFile', help='Camera Calibration File (JSON)')

CAMERA_ID = parser.parse_args().cameraID
CALIB_FILE = parser.parse_args().calibFile

# -----------------------------------------
# Put here the code to Initialize objets
# -----------------------------------------

# Init ARUCO dictionary
dictionary = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_ARUCO_ORIGINAL)

# define ARUCO detector object
detector = cv.aruco.ArucoDetector(dictionary)

# Load camera calibration data
try:
    with open(CALIB_FILE) as file:
        data = json.load(file)
except:
    print('Camera Calibration File not valid')
    exit()

cameraMatrix = np.array(data['camera_matrix'])
distCoeffs = np.array(data['distortion_coefficients'])

print(f"{cameraMatrix=}")
print(f"{distCoeffs=}")

# Single marker data    (used for individual marker POSE)
marker_size = 93.0    # mm
marker3Dpoints = np.array([ [-marker_size/2, -marker_size/2, 0],     # (0,0,0) center of the marker
                            [-marker_size/2,  marker_size/2, 0],
                            [ marker_size/2,  marker_size/2, 0],
                            [ marker_size/2, -marker_size/2, 0] ], dtype=np.float32)
    
# Open camera object
camera = cv.VideoCapture(CAMERA_ID)
if not camera.isOpened():
    print("you need to connect a camera, sorry.")
    exit()

# Getting camera resolution
cameraWidth = int(camera.get(cv.CAP_PROP_FRAME_WIDTH))
cameraHeight = int(camera.get(cv.CAP_PROP_FRAME_HEIGHT))

# Creating visualization windows
cv.namedWindow(WINDOW_CAMERA1, cv.WINDOW_AUTOSIZE)


print(f"Capturing images from camera {CAMERA_ID} ({cameraWidth},{cameraHeight})")
print("...Hit q/Q/Esc to exit.")

# -----------------------------------------
# Main Loop
# while there are images ...
# -----------------------------------------
while True:
    # Capture frame-by-frame
    ret, capture = camera.read()

    # if frame is read correctly ret is True
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    # -----------------------------------------
    # Put your image processing code here
    # -----------------------------------------

    gray_image = cv.cvtColor(capture, cv.COLOR_BGR2GRAY)  # transforms to gray level
    corners, ids, rejectedImgPoints = detector.detectMarkers(gray_image)
    dispimage = cv.aruco.drawDetectedMarkers(capture, corners, ids, borderColor=(0,0,255))

    # display corner order 
    for item in corners:
        for i in range(4):
            cv.putText(dispimage, str(i), item[0,i].astype(int), 
                       cv.FONT_HERSHEY_DUPLEX, 0.4, (0,255,255), 1, cv.LINE_AA)

    # Calculate POSE for each marker
    rvecs, tvecs =  [], []
    for item in corners:
        # As of opencv version 4.7 Aruco POSE estimation functions have been removed
        #rvecs, tvecs, _objPoints = cv.aruco.estimatePoseSingleMarkers(corners, marker_size, cameraMatrix, distCoeffs)
        
        ret, rvec, tvec = cv.solvePnP(marker3Dpoints, item, cameraMatrix, distCoeffs, 
                                        flags=cv.SOLVEPNP_ITERATIVE )
        rvecs.append(rvec)
        tvecs.append(tvec)
        

    # Draw axis for each marker
    for i in range(len(rvecs)):
        dispimage = cv.drawFrameAxes(dispimage, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], length=50.0, thickness=2)

    # -----------------------------------------
    # Put your visualization code here
    # -----------------------------------------
    cv.imshow(WINDOW_CAMERA1, dispimage)     # Display the resulting frame

    # check keystroke to exit (image window must be on focus)
    key = cv.pollKey()
    if key == ord('q') or key == ord('Q') or key == 27:
        break

# End while (main loop)


# -----------------------------------------
# free windows and camera resources
# -----------------------------------------
cv.destroyAllWindows()
if camera.isOpened():  camera.release()
