Using Python, OpenCV, etc., the video is processed by the interframe difference method, and then the video is divided and saved as a jpg file. And, while trying to automatically trim the divided images one by one, an IndexError occurred during the process.

I am a beginner, so other people create it based on the source code published online. This time I asked this question because I didn't know how to solve it.

Applicable source code
import cv2
import numpy as np
from PIL import Image
import os
import shutil
def video_2_frames (video_file, image_dir, image_file):
        # Delete the entire directory tree if it exists.
        if os.path.exists (image_dir):
            shutil.rmtree (image_dir)
        # Make the directory if it doesn't exist.
        if not os.path.exists (image_dir):
            os.makedirs (image_dir)
        # Video to frames
        global i
        global j
        global count
        i = 0
        j = 0
        count = 0
        cap = cv2.VideoCapture (video_file)
        fgbg = cv2.bgsegm.createBackgroundSubtractorMOG ()
        while (cap.isOpened ()):
            flag, frame = cap.read () # Capture frame-by-frame
            if flag == False: # Is a frame left?
            if i == 30:
                fgmask = fgbg.apply (frame)
                cv2.imwrite (image_dir + image_file% str (j), fgmask) # Save a frame
                print ('Save', image_dir + image_file% str (j))
                i = 0
                j + = 1
            i + = 1
        cap.release () # When everything done, release the capture
video_2_frames ('./ movie.mp4', './image_dir /', 'img_% s.jpg')
def nparray_to_rgb (nparry: np.array):
    return (int (nparry [0]), int (bg_color [1]), int (bg_color [2]))
def find_edge (img_path: str):
    img = cv2.imread (img_path, 0)
    blur = cv2.blur (img, (5,5))
    print ('blur')
    print (blur)
    edges = cv2.Canny (blur, 100,200)
    print ('edges')
    print (edges)
    return edges
def find_target (edges):
    results = np.where (edges == 255)
    print ('results')
    print (results)
    top = np.min (results [0])
    bottom = np.max (results [0])-1
    left = np.min (results [1])
    right = np.max (results [1])-1
    return (left, top, right, bottom)

def to_RGB (image: Image):
    if image.mode == 'RGB': return image
    background = Image.new ("RGB", image.size, (255, 255, 255))
    print ('background')
    print (background)
    background.paste (image, mask = image.split () [3]) # 3 is the alpha channel
    background.format = image.format
    return background
def get_crop_img (img_path: str):
    # im = cv2.imread ('./ hamasaki_trim/img _' + str (count) + '. png', 0)
    edges = find_edge (img_path)
    left, top, right, bottom = find_target (edges)
    rgb_img = to_RGB (Image.open (img_path))
    trim_img = rgb_img.crop ((left, top, right, bottom))
    cv2.imwrite ('./ image_dir/img _' + str (count) + '_ trim.jpg', trim_img)
    print ('crop_img')
for count_img in range (1, j):
    print ('crop _' + str (count_img))
    get_crop_img ('./ image_dir/img _' + str (count_img) + '. jpg')
    count_img + = 1
    count + = 1
Error message
line 71, in to_RGB
    background.paste (image, mask = image.split () [3]) # 3 is the alpha channel
IndexError: tuple index out of range

How can I resolve this error?
I would also appreciate if you could tell me why it is happening.

Supplemental information (FW/tool version etc.)

Development environment (IDE): Spyder (Python3.7)

  • Answer # 1


    How can I resolve this error?

    image.split () [3]

    It is assumed that the number of image channels is RGBA 4 channels, but it is probably an error because a grayscale image is passed to image and there is only 1 channel. it might be.
    Since it is speculation, try printing the value ofimage.modeto make sure it is RGBA.


    How can I resolve this error?

    You may need to add exception handling such as "Do not handle if not RGBA image".