基本矩阵估计#

此示例演示了如何使用稀疏 ORB 特征对应关系在两个视图之间稳健地估计极几何(立体视觉的几何形状)。

基本矩阵基本矩阵关联一对未校准图像之间的对应点。该矩阵将一个图像中的齐次图像点转换为另一个图像中的极线。

未校准表示两个相机的内参校准(焦距、像素倾斜、主点)未知。因此,基本矩阵能够对捕获场景进行投影 3D 重建。如果已知校准,则估计本质矩阵可以对捕获场景进行度量 3D 重建。

Inlier correspondences, Histogram of disparity errors
Number of matches: 223
Number of inliers: 162
/home/runner/work/scikit-image/scikit-image/doc/examples/transform/plot_fundamental_matrix.py:82: FutureWarning:

`plot_matches` is deprecated since version 0.23 and will be removed in version 0.25. Use `skimage.feature.plot_matched_features` instead.

import numpy as np
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt

img_left, img_right, groundtruth_disp = data.stereo_motorcycle()
img_left, img_right = map(rgb2gray, (img_left, img_right))

# Find sparse feature correspondences between left and right image.

descriptor_extractor = ORB()

descriptor_extractor.detect_and_extract(img_left)
keypoints_left = descriptor_extractor.keypoints
descriptors_left = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img_right)
keypoints_right = descriptor_extractor.keypoints
descriptors_right = descriptor_extractor.descriptors

matches = match_descriptors(descriptors_left, descriptors_right, cross_check=True)

print(f'Number of matches: {matches.shape[0]}')

# Estimate the epipolar geometry between the left and right image.
random_seed = 9
rng = np.random.default_rng(random_seed)

model, inliers = ransac(
    (keypoints_left[matches[:, 0]], keypoints_right[matches[:, 1]]),
    FundamentalMatrixTransform,
    min_samples=8,
    residual_threshold=1,
    max_trials=5000,
    rng=rng,
)

inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

print(f'Number of inliers: {inliers.sum()}')

# Compare estimated sparse disparities to the dense ground-truth disparities.

disp = inlier_keypoints_left[:, 1] - inlier_keypoints_right[:, 1]
disp_coords = np.round(inlier_keypoints_left).astype(np.int64)
disp_idxs = np.ravel_multi_index(disp_coords.T, groundtruth_disp.shape)
disp_error = np.abs(groundtruth_disp.ravel()[disp_idxs] - disp)
disp_error = disp_error[np.isfinite(disp_error)]

# Visualize the results.

fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

plot_matches(
    ax[0],
    img_left,
    img_right,
    keypoints_left,
    keypoints_right,
    matches[inliers],
    only_matches=True,
)
ax[0].axis("off")
ax[0].set_title("Inlier correspondences")

ax[1].hist(disp_error)
ax[1].set_title("Histogram of disparity errors")

plt.show()

脚本的总运行时间:(0 分钟 2.140 秒)

由 Sphinx-Gallery 生成的图库