处理器之间共享字典的Python 3多重处理和openCV问题

我想使用多重处理来计算SIFT提取和SIFT匹配以进行对象检测。 现在,我对不在字典中插入数据的函数的返回值有疑问。 我正在使用在函数内部打开的Manager类和图像。但是不起作用。 最后,我的想法是: 计算每个参考图像的关键点,将此关键点用作第二个函数的参数,该第二个函数与测试图像的关键点和描述符进行比较和匹配。

我的代码是:

# %% Import Section
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime
from multiprocessing import Process,cpu_count,Manager,Lock
import argparse
# %% path section
tests_path = 'TestImages/'
references_path = 'ReferenceImages2/'
result_path = 'ResultParametrizer/'
#%% Number of processor
cpus = cpu_count()
# %% parameter section
eps = 1e-7
useTwo = False  # using the m and n keypoint better with False
# good point parameters
distanca_coefficient = 0.75
# gms parameter
gms_thresholdFactor = 3
gms_withRotation = True
gms_withScale = True
# flann parameter
flann_trees = 5
flann_checks = 50

#%% Locker
lock = Lock()

# %% function definition
def keypointToDictionaries(keypoint):
    x,y = keypoint.pt
    pt = float(x),float(y)
    angle = float(keypoint.angle) if keypoint.angle is not None else None
    size = float(keypoint.size) if keypoint.size is not None else None
    response = float(keypoint.response) if keypoint.response is not None else None
    class_id = int(keypoint.class_id) if keypoint.class_id is not None else None
    octave = int(keypoint.octave) if keypoint.octave is not None else None
    return {
        'point': pt,'angle': angle,'size': size,'response': response,'class_id': class_id,'octave': octave
    }

def dictionariesToKeypoint(dictionary):
    kp = cv2.KeyPoint()
    kp.pt = dictionary['pt']
    kp.angle = dictionary['angle']
    kp.size = dictionary['size']
    kp.response = dictionary['response']
    kp.octave = dictionary['octave']
    kp.class_id = dictionary['class_id']
    return kp

def rootSIFT(dictionary,image_name,image_path,eps=eps):
    # SIFT init
    image = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
    sift = cv2.xfeatures2d.SIFT_create()
    keypoints,descriptors = sift.detectAndCompute(image,None)
    descriptors /= (descriptors.sum(axis=1,keepdims=True) + eps)
    descriptors = np.sqrt(descriptors)
    print('Finito di calcolare,PID: ',os.getpid())
    lock.acquire()
    dictionary[image_name]['keypoints'] = keypoints
    dictionary[image_name]['descriptors'] = descriptors
    lock.release()


def featureMatching(reference_image,reference_descriptors,reference_keypoints,test_image,test_descriptors,test_keypoints,flann_trees=flann_trees,flann_checks=flann_checks):
    # flaNN parameter
    flaNN_INDEX_KDTREE = 1
    index_params = dict(algorithm=flaNN_INDEX_KDTREE,trees=flann_trees)
    search_params = dict(checks=flann_checks)  # or pass empty dictionary
    flann = cv2.flannBasedMatcher(index_params,search_params)
    flann_matches = flann.knnmatch(reference_descriptors,k=2)
    matches_copy = []
    for i,(m,n) in enumerate(flann_matches):
        if m.distance < distanca_coefficient * n.distance:
            matches_copy.append(m)
    gsm_matches = cv2.xfeatures2d.matchGMS(reference_image.shape,test_image.shape,keypoints1=reference_keypoints,keypoints2=test_keypoints,matches1to2=matches_copy,withRotation=gms_withRotation,withScale=gms_withScale,thresholdFactor=gms_thresholdFactor)
#%% Starting reference list file creation
reference_init = datetime.now()
print('Start reference file list creation')
reference_image_process_list = []
manager = Manager()
reference_image_dictionary = manager.dict()
reference_image_list = manager.list()
for root,directories,files in os.walk(references_path):
    for file in files:
        if file.endswith('.DS_Store'):
            continue
        reference_image_path = os.path.join(root,file)
        reference_name = file.split('.')[0]
        image = cv2.imread(reference_image_path,cv2.IMREAD_GRAYSCALE)
        reference_image_dictionary[reference_name] = {
            'image': image,'keypoints': None,'descriptors': None
        }
        proc = Process(target=rootSIFT,args=(reference_image_list,reference_name,reference_image_path))
        reference_image_process_list.append(proc)
        proc.start()


for proc in reference_image_process_list:
    proc.join()


reference_end = datetime.now()
reference_time = reference_end - reference_init
print('End reference file list creation,time required: ',reference_time)
heixiaoniao 回答:处理器之间共享字典的Python 3多重处理和openCV问题

我几乎遇到了同样的错误。在我看来,代码挂在detectAndCompute上,而不是在创建字典时挂起。出于某些原因,筛选功能提取不是多处理安全的方法(据我了解,在Mac中是这种情况,但我不确定)。

我在github线程中发现了this。许多人说它有效,但我无法使其成功。 (编辑:,我稍后尝试了,效果很好)

相反,我使用了多线程,它几乎是相同的代码,并且运行良好。当然,您需要考虑multithreading vs multiprocessing

本文链接:https://www.f2er.com/3098364.html

大家都在问