当前位置: 首页 > news >正文

多视图几何--相机标定--DTL进行相机标定

DLT解析法相机标定

这部分利用DTL相机标定是研究生时的工作,重新学习多视图几何,遂重新总结,更多详细地原理参考《近景摄影测量学》。

1. 成像模型与投影方程

DLT基于共线方程(Collinearity Equation)的线性化形式,假设相机满足理想针孔模型(忽略畸变)。世界坐标系下的点 ( X w , Y w , Z w ) (X_w, Y_w, Z_w) (Xw,Yw,Zw) 投影到图像坐标系 ( u , v ) (u, v) (u,v) 的关系为:
s [ u v 1 ] = [ l 1 l 2 l 3 l 4 l 5 l 6 l 7 l 8 l 9 l 10 l 11 l 12 ] [ X w Y w Z w 1 ] s \begin{bmatrix} u \\ v \\ 1 \end{bmatrix} = \begin{bmatrix} l_1 & l_2 & l_3 & l_4 \\ l_5 & l_6 & l_7 & l_8 \\ l_9 & l_{10} & l_{11} & l_{12} \end{bmatrix} \begin{bmatrix} X_w \\ Y_w \\ Z_w \\ 1 \end{bmatrix} s uv1 = l1l5l9l2l6l10l3l7l11l4l8l12 XwYwZw1

其中 s s s 为尺度因子, L = [ l i ] L = [l_{i}] L=[li] 为投影矩阵,进行归一化操作,得到以下等式:
[ u v 1 ] = [ l 1 l 2 l 3 l 4 l 5 l 6 l 7 l 8 l 9 l 10 l 11 1 ] [ X w Y w Z w 1 ] \begin{bmatrix} u \\ v \\ 1 \end{bmatrix} = \begin{bmatrix} l_1 & l_2 & l_3 & l_4 \\ l_5 & l_6 & l_7 & l_8 \\ l_9 & l_{10} & l_{11} & 1 \end{bmatrix} \begin{bmatrix} X_w \\ Y_w \\ Z_w \\ 1 \end{bmatrix} uv1 = l1l5l9l2l6l10l3l7l11l4l81 XwYwZw1
在DLT中矩阵共有11个自由度,11个未知量。

2. 线性方程组的构建

消去尺度因子 s s s 后,每个3D-2D点对提供两个方程:
{ u = l 1 X w + l 2 Y w + l 3 Z w + l 4 l 9 X w + l 10 Y w + l 11 Z w + 1 v = l 5 X w + l 6 Y w + l 7 Z w + l 4 l 8 X w + l 10 Y w + l 11 Z w + 1 \begin{cases} u = \frac{l_{1}X_w + l_{2}Y_w + l_{3}Z_w + l_{4}}{l_{9}X_w + l_{10}Y_w + l_{11}Z_w + 1} \\ v =\frac{l_{5}X_w + l_{6}Y_w + l_{7}Z_w + l_{4}}{l_{8}X_w + l_{10}Y_w + l_{11}Z_w + 1} \end{cases} {u=l9Xw+l10Yw+l11Zw+1l1Xw+l2Yw+l3Zw+l4v=l8Xw+l10Yw+l11Zw+1l5Xw+l6Yw+l7Zw+l4

展开为齐次形式,得到线性约束:
[ X w Y w Z w 1 0 0 0 0 − u X w − u Y w − u Z w − u 0 0 0 0 X w Y w Z w 1 − v X w − v Y w − v Z w − v ] ⋅ l = 0 \begin{bmatrix} X_w & Y_w & Z_w & 1 & 0 & 0 & 0 & 0 & -uX_w & -uY_w & -uZ_w & -u \\ 0 & 0 & 0 & 0 & X_w & Y_w & Z_w & 1 & -vX_w & -vY_w & -vZ_w & -v \end{bmatrix} \cdot \mathbf{l} = 0 [Xw0Yw0Zw0100Xw0Yw0Zw01uXwvXwuYwvYwuZwvZwuv]l=0

其中 l = [ l 1 , l 2 , … , l 3 ] T \mathbf{l} = [l_{1}, l_{2}, \dots, l_{3}]^T l=[l1,l2,,l3]T。每个点贡献两行方程,至少需 6个控制点(12方程)求解11个独立参数(投影矩阵自由度为11)。

3.求解L矩阵元素初值

利用5.5个点构成11个方程直接求解L矩阵,得到主点坐标:
u 0 = − ( l 1 l 9 + l 2 l 10 + l 3 l 11 ) / ( l 9 2 + l 10 2 + l 11 2 ) v 0 = − ( l 5 l 9 + l 6 l 10 + l 7 l 11 ) / ( l 9 2 + l 10 2 + l 11 2 ) u_0=-\left(l_1 l_9+l_2 l_{10}+l_3 l_{11}\right) /\left(l_9^2+l_{10}^2+l_{11}^2\right) \\ v_0=-\left(l_5 l_9+l_6 l_{10}+l_7 l_{11}\right) /\left(l_9^2+l_{10}^2+l_{11}^2\right) \\ u0=(l1l9+l2l10+l3l11)/(l92+l102+l112)v0=(l5l9+l6l10+l7l11)/(l92+l102+l112)

4.迭代法求内外参矩阵元素及畸变系数

对于直接线性变换公式加上畸变量:
{ u = l 1 X w + l 2 Y w + l 3 Z w + l 4 l 9 X w + l 10 Y w + l 11 Z w + 1 + Δ u v = l 5 X w + l 6 Y w + l 7 Z w + l 4 l 8 X w + l 10 Y w + l 11 Z w + 1 + Δ v \begin{cases}u = \frac{l_{1}X_w + l_{2}Y_w + l_{3}Z_w + l_{4}}{l_{9}X_w + l_{10}Y_w + l_{11}Z_w + 1} +\Delta{u}\\v =\frac{l_{5}X_w + l_{6}Y_w + l_{7}Z_w + l_{4}}{l_{8}X_w + l_{10}Y_w + l_{11}Z_w + 1}+\Delta{v}\end{cases} {u=l9Xw+l10Yw+l11Zw+1l1Xw+l2Yw+l3Zw+l4+Δuv=l8Xw+l10Yw+l11Zw+1l5Xw+l6Yw+l7Zw+l4+Δv

Δ v = ( v − v 0 ) ( k 1 r 2 + k 2 r 4 + ⋯   ) + P 1 [ r 2 + 2 ( v − v 0 ) 2 ] + 2 P 2 ( v − v 0 ) ( u − u 0 ) Δ u = ( u − u 0 ) ( k 1 r 2 + k 2 r 4 + ⋯   ) + P 2 [ r 2 + 2 ( u − u 0 ) 2 ] + 2 P 1 ( v − v 0 ) ( u − u 0 ) } \left.\begin{array}{l}\Delta v=\left(v-v_0\right)\left(k_1 r^2+k_2 r^4+\cdots\right)+P_1\left[r^2+2\left(v-v_0\right)^2\right]+2 P_2\left(v-v_0\right)\left(u-u_0\right) \\\Delta u=\left(u-u_0\right)\left(k_1 r^2+k_2 r^4+\cdots\right)+P_2\left[r^2+2\left(u-u_0\right)^2\right]+2 P_1\left(v-v_0\right)\left(u-u_0\right)\end{array}\right\} Δv=(vv0)(k1r2+k2r4+)+P1[r2+2(vv0)2]+2P2(vv0)(uu0)Δu=(uu0)(k1r2+k2r4+)+P2[r2+2(uu0)2]+2P1(vv0)(uu0)

式中, u ,   v ,   v 0 ,   v 0 u, ~ v, ~ v_0, ~ v_0 u, v, v0, v0 定义如前:
k 1 ,   k 2 k_1, ~ k_2 k1, k2 —待定对称径向畸变系数;
P 1 ,   P 2 P_1, ~ P_2 P1, P2 ——待定切向畸变系数。
像点向径 r r r 的计算公式是:
r = ( u − u 0 ) 2 + ( v − v 0 ) 2 r=\sqrt{\left(u-u_0\right)^2+\left(v-v_0\right)^2} r=(uu0)2+(vv0)2
L矩阵11个参数,再加上四个畸变系数共有15个参数,最少需要8个点。利用3中的L矩阵初值和主点坐标,迭代求解L矩阵和畸变系数。再利用以下公式求解焦距:
f = l 1 2 + l 2 2 + l 3 2 l 9 2 + l 10 2 + l 11 2 − u 0 2 f= \sqrt{\frac{l_1^2+l_2^2+l_3^2}{l_9^2+l_{10}^2+l_{11}^2}-u_0^2} f=l92+l102+l112l12+l22+l32u02

5.代码实现

5.1 cpp
#include <iostream>
#include <vector>
#include <stdexcept>
#include <Eigen/Dense>
#include <unsupported/Eigen/NonLinearOptimization>



class CameraCalibrator {
using namespace Eigen;
private:
    struct ControlPoint {
        Vector3d world;
        Vector2d image;
    };

    std::vector<ControlPoint> points;
    Matrix3d T, T_inv, U, U_inv;
    VectorXd L; // [l1-l11] + [k1,k2,p1,p2]
    Vector4d distortion_coeffs;
    double focal_length;
    Vector2d principal_point;

    //---------------------- 核心计算模块 ----------------------
    MatrixXd buildAMatrix() {
        MatrixXd A(2*points.size(), 11);
        for (size_t i=0; i<points.size(); ++i) {
            const auto& p = points[i];
            const double X = p.world.x(), Y = p.world.y(), Z = p.world.z();
            const double u = p.image.x(), v = p.image.y();
            
            A.row(2*i) << X, Y, Z, 1, 0, 0, 0, 0, -u*X, -u*Y, -u*Z;
            A.row(2*i+1) << 0, 0, 0, 0, X, Y, Z, 1, -v*X, -v*Y, -v*Z;
        }
        return A;
    }

    void computeNormalization() {
        // 3D归一化
        Vector3d centroid_3d = Vector3d::Zero();
        for (const auto& p : points) centroid_3d += p.world;
        centroid_3d /= points.size();

        double avg_distance = 0;
        for (auto& p : points) {
            p.world -= centroid_3d;
            avg_distance += p.world.norm();
        }
        avg_distance /= points.size();
        const double scale_3d = sqrt(3) / avg_distance;

        T = Matrix3d::Identity();
        T.block<3,3>(0,0) *= scale_3d;
        T.col(2) = -centroid_3d * scale_3d;
        T_inv = T.inverse();

        // 2D归一化
        Vector2d centroid_2d = Vector2d::Zero();
        for (const auto& p : points) centroid_2d += p.image;
        centroid_2d /= points.size();

        avg_distance = 0;
        for (auto& p : points) {
            p.image -= centroid_2d;
            avg_distance += p.image.norm();
        }
        avg_distance /= points.size();
        const double scale_2d = sqrt(2) / avg_distance;

        U = Matrix3d::Identity();
        U.block<2,2>(0,0) *= scale_2d;
        U.block<2,1>(0,2) = -centroid_2d * scale_2d;
        U_inv = U.inverse();
    }

    //---------------------- 非线性优化模块 ----------------------
    struct OptimizationFunctor : public DenseFunctor<double> {
        CameraCalibrator& calibrator;
        
        OptimizationFunctor(CameraCalibrator& cc) : 
            DenseFunctor<double>(15, 2*cc.points.size()), calibrator(cc) {}

        int operator()(const VectorXd& params, VectorXd& residuals) const {
            const VectorXd L = params.head(11);
            const double k1 = params[11], k2 = params[12];
            const double p1 = params[13], p2 = params[14];

            // 计算主点
            const double denom = L[8]*L[8] + L[9]*L[9] + L[10]*L[10];
            const double u0 = -(L[0]*L[8] + L[1]*L[9] + L[2]*L[10]) / denom;
            const double v0 = -(L[4]*L[8] + L[5]*L[9] + L[6]*L[10]) / denom;

            for (size_t i=0; i<calibrator.points.size(); ++i) {
                const Vector3d& P = calibrator.points[i].world;
                const Vector2d& p_obs = calibrator.points[i].image;

                // 投影计算
                const double denominator = L[8]*P.x() + L[9]*P.y() + L[10]*P.z() + 1;
                const double u_proj = (L[0]*P.x() + L[1]*P.y() + L[2]*P.z() + L[3]) / denominator;
                const double v_proj = (L[4]*P.x() + L[5]*P.y() + L[6]*P.z() + L[7]) / denominator;

                // 畸变计算
                const double dx = u_proj - u0;
                const double dy = v_proj - v0;
                const double r2 = dx*dx + dy*dy;
                const double radial = 1 + k1*r2 + k2*r2*r2;
                
                const double x_dist = dx*radial + 2*p1*dx*dy + p2*(r2 + 2*dx*dx);
                const double y_dist = dy*radial + p1*(r2 + 2*dy*dy) + 2*p2*dx*dy;

                // 残差计算
                residuals[2*i]   = p_obs.x() - (u0 + x_dist);
                residuals[2*i+1] = p_obs.y() - (v0 + y_dist);
            }
            return 0;
        }
    };

    void performOptimization() {
        OptimizationFunctor functor(*this);
        NumericalDiff<OptimizationFunctor> numDiff(functor);
        LevenbergMarquardt<NumericalDiff<OptimizationFunctor>> lm(numDiff);

        lm.parameters.maxfev = 1000;
        lm.minimize(L);
    }

    //---------------------- 参数计算模块 ----------------------
    void computeIntrinsicParameters() {
        // 反归一化投影矩阵
        Matrix34d P = U_inv * Map<Matrix<double,3,4>>(L.data()).block<3,3>(0,0) * T_inv;

        // 计算主点
        const double denom = P(2,0)*P(2,0) + P(2,1)*P(2,1) + P(2,2)*P(2,2);
        principal_point.x() = -(P(0,0)*P(2,0) + P(0,1)*P(2,1) + P(0,2)*P(2,2)) / denom;
        principal_point.y() = -(P(1,0)*P(2,0) + P(1,1)*P(2,1) + P(1,2)*P(2,2)) / denom;

        // 计算焦距
        const double numerator = P(0,0)*P(0,0) + P(0,1)*P(0,1) + P(0,2)*P(0,2);
        focal_length = std::sqrt(numerator/denom - principal_point.x()*principal_point.x());

        // 保存畸变系数
        distortion_coeffs << L[11], L[12], L[13], L[14];
    }

public:
    CameraCalibrator() : L(15), focal_length(0) {
        L.setZero();
    }

    void addControlPoint(const Vector3d& world, const Vector2d& image) {
        points.push_back({world, image});
    }

    void calibrate() {
        if (points.size() < 8)
            throw std::runtime_error("需要至少8个控制点进行标定");

        computeNormalization();
        const MatrixXd A = buildAMatrix();
        JacobiSVD<MatrixXd> svd(A, ComputeThinV);
        L.head(11) = svd.matrixV().col(10);

        performOptimization();
        computeIntrinsicParameters();
    }

    //---------------------- 结果获取接口 ----------------------
    double getFocalLength() const { return focal_length; }
    Vector2d getPrincipalPoint() const { return principal_point; }
    Vector4d getDistortionCoefficients() const { return distortion_coeffs; }
    Matrix3d getIntrinsicMatrix() const {
        Matrix3d K = Matrix3d::Identity();
        K(0,0) = K(1,1) = focal_length;
        K(0,2) = principal_point.x();
        K(1,2) = principal_point.y();
        return K;
    }
};

// 使用示例
int main() {
    CameraCalibrator calibrator;

    // 添加的控制点
    calibrator.addControlPoint({1, 2, 3}, {400, 300});
    calibrator.addControlPoint({4, 5, 6}, {500, 400});
    // 添加更多控制点...

    try {
        calibrator.calibrate();
        
        std::cout << "内参矩阵:\n" << calibrator.getIntrinsicMatrix() << "\n\n"
                  << "主点坐标: " << calibrator.getPrincipalPoint().transpose() << "\n"
                  << "焦距: " << calibrator.getFocalLength() << "\n"
                  << "畸变系数 [k1, k2, p1, p2]: " 
                  << calibrator.getDistortionCoefficients().transpose() << std::endl;
    } catch (const std::exception& e) {
        std::cerr << "标定错误: " << e.what() << std::endl;
    }

    return 0;
}
5.2 python
import numpy as np
from scipy.optimize import least_squares

class CameraCalibrator:
    def __init__(self):
        self.points = []
        self.T = np.eye(4)       # 3D归一化矩阵(齐次坐标形式)
        self.U = np.eye(3)       # 2D归一化矩阵
        self.L = np.zeros(15)    # 参数向量:[l1-l11, k1, k2, p1, p2]
        self.focal = 0.0         # 焦距
        self.principal_point = np.zeros(2)  # 主点(u0, v0)
        self.distortion = np.zeros(4)       # 畸变系数[k1, k2, p1, p2]

    def add_point(self, world, image):
        """添加3D-2D对应点"""
        self.points.append({
            'world': np.array(world, dtype=np.float64),
            'image': np.array(image, dtype=np.float64)
        })

    def calibrate(self):
        """执行完整标定流程"""
        if len(self.points) < 8:
            raise ValueError("至少需要8个控制点")
        
        # 数据预处理
        self._normalize_points()
        
        # DLT初值求解
        A = self._build_A_matrix()
        self.L[:11] = self._solve_DLT(A)
        
        # 非线性优化
        self._optimize_parameters()
        
        # 参数反归一化
        self._denormalize_parameters()
        
        # 计算内参
        self._compute_intrinsics()

    def _normalize_points(self):
        """数据归一化处理"""
        # 3D点归一化
        world_pts = np.array([p['world'] for p in self.points])
        centroid_3d = np.mean(world_pts, axis=0)
        centered_3d = world_pts - centroid_3d
        
        # 计算缩放因子(确保平均距离为sqrt(3))
        avg_norm_3d = np.mean(np.linalg.norm(centered_3d, axis=1))
        scale_3d = np.sqrt(3) / avg_norm_3d
        
        # 构建3D归一化矩阵(齐次坐标形式)
        self.T = np.eye(4)
        self.T[:3, :3] = np.eye(3) * scale_3d
        self.T[:3, 3] = -centroid_3d * scale_3d
        
        # 2D点归一化
        image_pts = np.array([p['image'] for p in self.points])
        centroid_2d = np.mean(image_pts, axis=0)
        centered_2d = image_pts - centroid_2d
        
        # 计算缩放因子(确保平均距离为sqrt(2))
        avg_norm_2d = np.mean(np.linalg.norm(centered_2d, axis=1))
        scale_2d = np.sqrt(2) / avg_norm_2d
        
        # 构建2D归一化矩阵
        self.U = np.array([
            [scale_2d, 0, -centroid_2d[0]*scale_2d],
            [0, scale_2d, -centroid_2d[1]*scale_2d],
            [0, 0, 1]
        ])
        
        # 应用归一化变换
        for p in self.points:
            # 3D点转换(齐次坐标)
            world_h = np.append(p['world'], 1)
            p['world_norm'] = (self.T @ world_h)[:3]
            
            # 2D点转换
            image_h = np.append(p['image'], 1)
            p['image_norm'] = (self.U @ image_h)[:2]

    def _build_A_matrix(self):
        """构建DLT线性方程组矩阵"""
        n = len(self.points)
        A = np.zeros((2*n, 11))
        
        for i in range(n):
            X, Y, Z = self.points[i]['world_norm']
            u, v = self.points[i]['image_norm']
            
            # 第一行方程
            A[2*i] = [
                X, Y, Z, 1,
                0, 0, 0, 0,
                -u*X, -u*Y, -u*Z
            ]
            
            # 第二行方程
            A[2*i+1] = [
                0, 0, 0, 0,
                X, Y, Z, 1,
                -v*X, -v*Y, -v*Z
            ]
        
        return A

    def _solve_DLT(self, A):
        """通过SVD求解投影矩阵参数"""
        _, _, V = np.linalg.svd(A)
        return V[-1, :]

    def _optimize_parameters(self):
        """Levenberg-Marquardt非线性优化"""
        # 设置初始参数(畸变系数初始化为0)
        initial_params = np.zeros(15)
        initial_params[:11] = self.L[:11]
        
        # 执行优化
        result = least_squares(
            fun=self._compute_residuals,
            x0=initial_params,
            method='lm',
            max_nfev=1000
        )
        
        self.L = result.x

    def _compute_residuals(self, params):
        """计算残差(归一化坐标系)"""
        residuals = []
        L = params[:11]
        k1, k2, p1, p2 = params[11], params[12], params[13], params[14]
        
        # 计算主点
        denom = L[8]**2 + L[9]**2 + L[10]**2
        u0 = -(L[0]*L[8] + L[1]*L[9] + L[2]*L[10]) / denom
        v0 = -(L[4]*L[8] + L[5]*L[9] + L[6]*L[10]) / denom
        
        for p in self.points:
            # 投影计算
            X, Y, Z = p['world_norm']
            denominator = L[8]*X + L[9]*Y + L[10]*Z + 1
            u_proj = (L[0]*X + L[1]*Y + L[2]*Z + L[3]) / denominator
            v_proj = (L[4]*X + L[5]*Y + L[6]*Z + L[7]) / denominator
            
            # 畸变补偿
            dx = u_proj - u0
            dy = v_proj - v0
            r2 = dx**2 + dy**2
            
            # 径向畸变
            radial = 1 + k1*r2 + k2*r2**2
            
            # 切向畸变
            x_tangential = 2*p1*dx*dy + p2*(r2 + 2*dx**2)
            y_tangential = p1*(r2 + 2*dy**2) + 2*p2*dx*dy
            
            # 畸变后坐标
            u_dist = u0 + dx*radial + x_tangential
            v_dist = v0 + dy*radial + y_tangential
            
            # 残差计算
            u_obs, v_obs = p['image_norm']
            residuals.append(u_obs - u_dist)
            residuals.append(v_obs - v_dist)
        
        return np.array(residuals)

    def _denormalize_parameters(self):
        """参数反归一化到原始坐标系"""
        # 反归一化投影矩阵
        L_norm = self.L[:11].reshape(3, 4)
        L_denorm = self.U @ L_norm @ np.linalg.inv(self.T)
        self.L[:11] = L_denorm.flatten()

    def _compute_intrinsics(self):
        """计算内参和畸变系数"""
        L = self.L[:11]
        denom = L[8]**2 + L[9]**2 + L[10]**2
        
        # 主点计算
        self.principal_point[0] = -(L[0]*L[8] + L[1]*L[9] + L[2]*L[10]) / denom
        self.principal_point[1] = -(L[4]*L[8] + L[5]*L[9] + L[6]*L[10]) / denom
        
        # 焦距计算
        numerator = L[0]**2 + L[1]**2 + L[2]**2
        self.focal = np.sqrt(numerator / denom - self.principal_point[0]**2)
        
        # 保存畸变系数
        self.distortion = self.L[11:]

    # ---------------------- 结果访问接口 ----------------------
    def get_intrinsic_matrix(self):
        """获取内参矩阵 K"""
        return np.array([
            [self.focal, 0, self.principal_point[0]],
            [0, self.focal, self.principal_point[1]],
            [0, 0, 1]
        ])
    
    def get_distortion_coefficients(self):
        """获取畸变系数 [k1, k2, p1, p2]"""
        return self.distortion.copy()


if __name__ == "__main__":
    
    calibrator = CameraCalibrator()
    
    # 添加控制点
    calibrator.add_point([1, 2, 3], [400, 300])
    calibrator.add_point([4, 5, 6], [500, 400])

    
    try:
        # 执行标定
        calibrator.calibrate()
        
        # 输出结果
        print("内参矩阵 K:")
        print(calibrator.get_intrinsic_matrix())
        print("\n主点坐标 (u0, v0):", calibrator.principal_point)
        print("焦距 f:", calibrator.focal)
        print("畸变系数 [k1, k2, p1, p2]:", calibrator.get_distortion_coefficients())
        
    except Exception as e:
        print(f"标定失败: {str(e)}")

相关文章:

  • 每日学Java之一万个为什么
  • C++函数高阶
  • 19天 - HTTP 1.0 和 2.0 有什么区别?HTTP 2.0 和 3.0 有什么区别?HTTP 和 HTTPS 有什么区别?
  • 单元测试、系统测试和集成测试知识总结
  • 物联网在电力行业的应用
  • 网络DNS怎么更改?
  • 【前端】BOM DOM
  • Fuel 爬虫:Scala 中的图片数据采集与分析
  • DeepSeek 助力 Vue3 开发:打造丝滑的表格(Table)之添加列宽调整功能,示例Table14基础固定表头示例
  • HTTP拾技杂谈
  • 16、流量控制是怎么实现的?【中高频】
  • Django 5实用指南(十四)项目部署与性能优化【完】
  • 非线性优化--NLopt算法(Android版本和Python示例)
  • DeepSeek 助力 Vue3 开发:打造丝滑的表格(Table)之添加列宽调整功能,示例Table14_01基础固定表头示例
  • 单调栈、单调队列
  • 目标检测Anchor-based 与 Anchor-free
  • 【Linux内核系列】:深入理解缓冲区
  • 用ABBYY PDF Transformer+对PDF的创建编辑转换和注释等操作
  • 【实战ES】实战 Elasticsearch:快速上手与深度实践-6.2.2GDPR数据脱敏处理
  • BUUCTF [GUET-CTF2019]soul sipse 1
  • 正荣地产:前4个月销售14.96亿元,控股股东已获委任联合清盘人
  • 著名国际关系理论家、“软实力”概念提出者约瑟夫•奈逝世
  • 保利发展前4个月销售额约876亿元,单月斥资128亿元获4个项目
  • 印方称若巴方决定升级局势,印方已做好反击准备
  • 轿车追尾半挂车致3死1伤,事故调查报告:司机过分依赖巡航系统
  • 中国证监会印发《推动公募基金高质量发展行动方案》