set cut_paste_input [stack 0] version 14.1 v2 push $cut_paste_input NoOp { name Auto_Frame_Detect3 tile_color 0xffff00 selected true xpos 8593 ypos 23228 addUserKnob {20 Auto_Frame_Detect_Tab l "Auto Frame Detect"} addUserKnob {26 info l "" +STARTLINE T "It can be slow when you connect .EXR\nway faster when it opens with .mov"} addUserKnob {26 ""} addUserKnob {3 count l "Framehold Count" t "How many frames will be detected"} count 2 addUserKnob {7 threshold l Sensitivity R 0 100} threshold 10 addUserKnob {26 ""} addUserKnob {22 Run_Auto_Frame_Detect l "Auto Frame Detect" T "import cv2\nimport numpy as np\nimport nuke\nimport OpenEXR\nimport Imath\nimport os\n\n# Function to load frames from EXR files\ndef load_exr_frames(exr_directory):\n exr_files = sorted(\[f for f in os.listdir(exr_directory) if f.endswith('.exr')])\n frames = \[]\n for file_name in exr_files:\n file_path = os.path.join(exr_directory, file_name)\n exr_file = OpenEXR.InputFile(file_path)\n dw = exr_file.header()\['dataWindow']\n size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)\n pt = Imath.PixelType(Imath.PixelType.FLOAT)\n rgb = \[np.frombuffer(exr_file.channel(c, pt), dtype=np.float32) for c in ('R', 'G', 'B')]\n img_array = np.dstack(rgb).reshape((size\[1], size\[0], 3))\n rescaled_img_array = cv2.resize(img_array, (size\[0] // 16, size\[1] // 16), interpolation=cv2.INTER_AREA)\n normalized_img_array = cv2.normalize(rescaled_img_array, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)\n frames.append(normalized_img_array)\n return frames\n\n# Determine the file type and load frames\nn = nuke.selectedNode()\ninput_node = n.input(0)\nfirst_frame = int(input_node\['first'].value()) # Retrieve the first frame number\nlast_frame = int(input_node\['last'].value())\nif input_node and input_node.Class() == 'Read':\n file_path = input_node\['file'].getValue()\n\t\n file_extension = os.path.splitext(file_path)\[-1].lower()\n if file_extension in \['.mov', '.mp4']:\n cap = cv2.VideoCapture(file_path)\n frames = \[cap.read()\[1] for _ in range(int(input_node\['last'].value()))]\n cap.release()\n elif file_extension == '.exr':\n exr_directory = os.path.dirname(file_path)\n frames = load_exr_frames(exr_directory)\n else:\n raise Exception(\"Unsupported file format\")\nelse:\n raise Exception(\"No Read node connected or selected node is not a Read node\")\n\nif not frames:\n raise Exception(\"Failed to load frames from the file\")\n\n# Assume the maximum threshold value is 100\nmax_threshold_value = 100\n\n# Get the user input from the knob\nuser_input_threshold = int(nuke.thisNode().knob('threshold').value())\n\n# Calculate the inverse threshold\nthreshold = max_threshold_value - user_input_threshold\n\n\n# Motion detection and frame processing\n\nprevious_frame = None\n\ndef detect_motion(previous_frame, current_frame, threshold):\n # Calculate the frame difference\n frame_diff = cv2.absdiff(previous_frame, current_frame)\n \n # Threshold the difference image to get a binary image\n _, thresholded_diff = cv2.threshold(frame_diff, threshold, 255, cv2.THRESH_BINARY)\n \n # Ensure the binary image is in 8-bit format, required for findContours\n thresholded_diff = thresholded_diff.astype(np.uint8)\n \n # Define a structuring element for morphological operations\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n \n # Perform morphological open operation to remove noise\n opened_diff = cv2.morphologyEx(thresholded_diff, cv2.MORPH_OPEN, kernel)\n \n # Find contours from the binary image\n contours, _ = cv2.findContours(opened_diff, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \n # Calculate bounding rectangles for each contour\n return \[cv2.boundingRect(contour) for contour in contours]\n\ndef process_frame(frame, frame_number):\n global previous_frame\n current_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if previous_frame is None:\n previous_frame = current_frame_gray\n return frame\n motion_regions = detect_motion(previous_frame, current_frame_gray, threshold)\n for x, y, w, h in motion_regions:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(frame, f\"Frame: \{int(frame_number)\}\", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n previous_frame = current_frame_gray\n return frame\n\nframe_sums = \[]\nfor frame_number, frame in enumerate(frames):\n if frame is None:\n print(\"Error reading frame\")\n break\n processed_frame = process_frame(frame, frame_number)\n motion_regions = detect_motion(previous_frame, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), threshold)\n frame_sum = sum(w * h for _, _, w, h in motion_regions)\n frame_sums.append(frame_sum)\n cv2.imshow('Processed Frame', processed_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\n# Create FrameHold nodes based on motion\ncount = int(nuke.thisNode().knob('count').value())\nread_node_x = n.xpos()\nread_node_y = n.ypos()\nindices = np.argsort(frame_sums)\[-count:]\[::-1] # Get indices of frames with the most motion\n\nfor i, frame_index in enumerate(indices):\n actual_frame_number = frame_index + first_frame # Adjust index by adding first_frame\n frame_sum = frame_sums\[frame_index]\n framehold_node = nuke.createNode('FrameHold')\n framehold_node\['first_frame'].setValue(actual_frame_number) # Set to actual frame number\n framehold_node\['label'].setValue(f\"Sum: \{frame_sum\}\")\n node_x = read_node_x - ((count - 1) * 100 // 2) + 100 * i\n node_y = read_node_y + 150\n framehold_node.setXpos(node_x)\n framehold_node.setYpos(node_y)\n framehold_node.setInput(0, n)" +STARTLINE} }