ShaderToy学习笔记 06.摄像机
1. 摄像机
1.1. 基本概念
摄像机在shader中主要用于定义观察空间,它决定了我们如何看到3D场景。主要包含以下要素:
- 位置(Position): 摄像机在3D空间中的位置坐标
- 方向(Direction): 摄像机朝向的方向
- 上向量(Up Vector): 定义摄像机的上方方向
- 视野(FOV): 即Field of View,决定视野的大小
1.2. 创建LookAt矩阵
以下是创建LookAt矩阵的代码,它将摄像机位置和观察点及上向量作为参数,返回一个3x3的矩阵,用于将3D空间中的点转换到观察空间。 通常 upVector 为 (0, 1, 0)
,但也可以使用其他向量。
mat3 camera(vec3 cameraPos, vec3 lookAtPoint, vec3 upVector) {vec3 cd = normalize(lookAtPoint - cameraPos); // camera directionvec3 cr = normalize(cross(upVector, cd)); // camera rightvec3 cu = normalize(cross(cd, cr)); // camera upreturn mat3(-cr, cu, -cd); //转换为x轴向右,y轴向上,z轴向屏幕外边的右手坐标系
}
注意:mat3(-cr, cu, -cd);
中的负号是必须的,因为世界坐标系采用右手标系,x轴向右,y轴向上,z轴向屏幕外边,而LookAt矩阵中,x轴向右,y轴向上,z轴向屏幕里面,所以需要取反。
观察空间中的坐标系如下图所示:
由 Learn OpenGL 提供的摄像机/视图空间 提供
观察空间的坐标系我们采用右手坐标系,需要三个向量来定义,分别是:
forward
: 摄像机前向量,指向前方right
: 摄像机右向量,指向右方up
: 摄像机上向量,指向上方
1.3. 初始场景
创建一个简单的场景,包含三个不同颜色的立方体及地板,如下图所示:
#define PIXW (1./iResolution.y)const int MAX_STEPS = 100;
const float START_DIST = 0.001;
const float MAX_DIST = 100.0;
const float EPSILON = 0.0001;struct SDFResult
{float d;vec3 color;
};
mat4 rotationX(float theta)
{return mat4(1.0, 0.0, 0.0, 0.0,0.0, cos(theta), -sin(theta), 0.0,0.0, sin(theta), cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}mat4 rotationY(float theta)
{return mat4(cos(theta), 0.0, sin(theta), 0.0,0.0, 1.0, 0.0, 0.0,-sin(theta), 0.0, cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}
mat4 rotationZ(float theta)
{return mat4(cos(theta), -sin(theta), 0.0, 0.0,sin(theta), cos(theta), 0.0, 0.0,0.0, 0.0, 1.0, 0.0,0.0, 0.0, 0.0, 1.0);
}
SDFResult sdBox( vec3 p, vec3 b,vec3 offset,vec3 color )
{vec3 q = abs(p-offset) - b;return SDFResult(length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.0),color);
}vec3 getBackgroundColor(vec2 uv)
{
//uv.y [-1,1]
//y: [0,1] float y=(uv.y+1.)/2.; return mix(vec3(1,0,1),vec3(0,1,1),y);
}SDFResult sdSphere(vec3 p, float r,vec3 offset,vec3 color)
{return SDFResult(length(p-offset)-r,color);
}
SDFResult sdFloor(vec3 p,vec3 color)
{float d=p.y+1.;return SDFResult(d,color);
}SDFResult minWithColor(SDFResult a,SDFResult b)
{if (a.d<b.d){return a;}return b;
}
SDFResult sdScene(vec3 p)
{SDFResult result1=sdBox(p,vec3(1.,1.0,1.),vec3(-4,0.2,-4),vec3(1.,0.,0.));SDFResult result2=sdBox(p,vec3(1.,1.0,1.),vec3(0,0.2,-4),vec3(0.,1.,0.));SDFResult result3=sdBox(p,vec3(1.,1.0,1.),vec3(4,0.2,-4),vec3(0.,0.,1.));SDFResult result=minWithColor(result1,result2);result=minWithColor(result,result3);vec3 floorColor = vec3(1. + 0.7*mod(floor(p.x) + floor(p.z), 2.0));result=minWithColor(result, sdFloor(p,floorColor));return result;
}
//法线计算
vec3 calcNormal(vec3 p) {vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilonfloat r = 1.; // radius of spherereturn normalize(e.xyy * sdScene(p + e.xyy).d +e.yyx * sdScene(p + e.yyx).d +e.yxy * sdScene(p + e.yxy).d +e.xxx * sdScene(p + e.xxx).d);
}SDFResult rayMarch(vec3 ro, vec3 rd,float start,float end)
{float d=start;float r=1.0;SDFResult result;for(int i=0;i<MAX_STEPS;i++){vec3 p=ro+rd*d;result=sdScene(p);d+=result.d;if(result.d<EPSILON || d>end) break;}result.d=d;return result;
}void mainImage( out vec4 fragColor, in vec2 fragCoord )
{// Normalized pixel coordinates (from -1 to 1)vec2 uv = (2.0*fragCoord-iResolution.xy)/iResolution.xx;float r=0.3;vec3 backgroundColor = vec3(0.835, 1, 1);//vec3 c=getBackgroundColor(uv);vec3 c=backgroundColor;vec3 ro = vec3(0, 0, 3.); // ray origin that represents camera positionvec3 rd = normalize(vec3(uv, -1)); // ray directionSDFResult result=rayMarch(ro,rd,START_DIST,MAX_DIST);float d=result.d;if(d<MAX_DIST){//平行光源的漫反射计算vec3 p=ro+rd*d;vec3 n=calcNormal(p);vec3 lightPosition=vec3(2,2,7);//vec3 light_direction=normalize(vec3(1,0,5));vec3 light_direction=normalize(lightPosition-p);vec3 light_color=vec3(1,1,1);float diffuse=max(0.0,dot(n,light_direction));diffuse=clamp(diffuse,0.1,1.0);c=light_color*diffuse*result.color+backgroundColor*0.2;}// Output to screenfragColor = vec4(vec3(c),1.0);
}
1.4. 摄像机向右移动
核心代码
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{//other code ...vec3 c=backgroundColor;vec3 lp=vec3(0,0.2,-4);vec3 ro = vec3(5, 0, 3.); // ray origin that represents camera positionvec3 rd = camera(ro,lp,vec3(0,1,0))*normalize(vec3(uv, -1)); // ray direction}
通过调整 camera
函数中的参数,可以控制相机的位置和朝向。 其中 lp为相机看向的位置,即图中绿色的立方体,该立方体将始终在画面的中心。
1.5. 摄像机前后移动
核心代码
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{//other code ...vec3 c=backgroundColor;vec3 lp=vec3(0,0.2,-4);vec3 ro = vec3(0, 0, 3.+sin(iTime)*2.); // ray origin that represents camera positionthat represents camera positionvec3 rd = camera(ro,lp,vec3(0,1,0))*normalize(vec3(uv, -1)); // ray direction}
即移动相机位置即可
完整代码
#define PIXW (1./iResolution.y)const int MAX_STEPS = 100;
const float START_DIST = 0.001;
const float MAX_DIST = 100.0;
const float EPSILON = 0.0001;struct SDFResult
{float d;vec3 color;
};
mat4 rotationX(float theta)
{return mat4(1.0, 0.0, 0.0, 0.0,0.0, cos(theta), -sin(theta), 0.0,0.0, sin(theta), cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}mat4 rotationY(float theta)
{return mat4(cos(theta), 0.0, sin(theta), 0.0,0.0, 1.0, 0.0, 0.0,-sin(theta), 0.0, cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}
mat4 rotationZ(float theta)
{return mat4(cos(theta), -sin(theta), 0.0, 0.0,sin(theta), cos(theta), 0.0, 0.0,0.0, 0.0, 1.0, 0.0,0.0, 0.0, 0.0, 1.0);
}
SDFResult sdBox( vec3 p, vec3 b,vec3 offset,vec3 color )
{vec3 q = abs(p-offset) - b;return SDFResult(length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.0),color);
}vec3 getBackgroundColor(vec2 uv)
{
//uv.y [-1,1]
//y: [0,1] float y=(uv.y+1.)/2.; return mix(vec3(1,0,1),vec3(0,1,1),y);
}SDFResult sdSphere(vec3 p, float r,vec3 offset,vec3 color)
{return SDFResult(length(p-offset)-r,color);
}
SDFResult sdFloor(vec3 p,vec3 color)
{float d=p.y+1.;return SDFResult(d,color);
}SDFResult minWithColor(SDFResult a,SDFResult b)
{if (a.d<b.d){return a;}return b;
}mat3 camera(vec3 cameraPos, vec3 lookAtPoint, vec3 upVector) {vec3 cd = normalize(lookAtPoint - cameraPos); // camera directionvec3 cr = normalize(cross(upVector, cd)); // camera rightvec3 cu = normalize(cross(cd, cr)); // camera upreturn mat3(-cr, cu, -cd); //转换为x轴向右,y轴向上,z轴向屏幕外边的右手坐标系
}SDFResult sdScene(vec3 p)
{SDFResult result1=sdBox(p,vec3(1.,1.0,1.),vec3(-4,0.2,-4),vec3(1.,0.,0.));SDFResult result2=sdBox(p,vec3(1.,1.0,1.),vec3(0,0.2,-4),vec3(0.,1.,0.));SDFResult result3=sdBox(p,vec3(1.,1.0,1.),vec3(4,0.2,-4),vec3(0.,0.,1.));SDFResult result=minWithColor(result1,result2);result=minWithColor(result,result3);vec3 floorColor = vec3(1. + 0.7*mod(floor(p.x) + floor(p.z), 2.0));result=minWithColor(result, sdFloor(p,floorColor));return result;
}
//法线计算
vec3 calcNormal(vec3 p) {vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilonfloat r = 1.; // radius of spherereturn normalize(e.xyy * sdScene(p + e.xyy).d +e.yyx * sdScene(p + e.yyx).d +e.yxy * sdScene(p + e.yxy).d +e.xxx * sdScene(p + e.xxx).d);
}SDFResult rayMarch(vec3 ro, vec3 rd,float start,float end)
{float d=start;float r=1.0;SDFResult result;for(int i=0;i<MAX_STEPS;i++){vec3 p=ro+rd*d;result=sdScene(p);d+=result.d;if(result.d<EPSILON || d>end) break;}result.d=d;return result;
}void mainImage( out vec4 fragColor, in vec2 fragCoord )
{// Normalized pixel coordinates (from -1 to 1)vec2 uv = (2.0*fragCoord-iResolution.xy)/iResolution.xx;float r=0.3;vec3 backgroundColor = vec3(0.835, 1, 1);//vec3 c=getBackgroundColor(uv);vec3 c=backgroundColor;vec3 lp=vec3(0,0.2,-4);vec3 ro = vec3(0, 0, 3.+sin(iTime)*2.); // ray origin that represents camera positionvec3 rd = camera(ro,lp,vec3(0,1,0))*normalize(vec3(uv, -1)); // ray directionSDFResult result=rayMarch(ro,rd,START_DIST,MAX_DIST);float d=result.d;if(d<MAX_DIST){//平行光源的漫反射计算vec3 p=ro+rd*d;vec3 n=calcNormal(p);vec3 lightPosition=vec3(2,2,7);//vec3 light_direction=normalize(vec3(1,0,5));vec3 light_direction=normalize(lightPosition-p);vec3 light_color=vec3(1,1,1);float diffuse=max(0.0,dot(n,light_direction));diffuse=clamp(diffuse,0.1,1.0);c=light_color*diffuse*result.color+backgroundColor*0.2;}// Output to screenfragColor = vec4(vec3(c),1.0);
}
1.6. 摄像机围绕物体旋转
运行结果如下:
思路:
我们只考虑摄像机在xz平面上的旋转,y轴不变。这样就是要改变 x轴和z轴的位置 。
从顶部向下看 ,摄像机将在黑色圆形路径上移动。
即类似下图:可参考 desmos上创建的图表来试验循环路径。想象一下绿色的立方体位于圆圈的中心
核心代码
vec3 ro = vec3(0, 0, 3); // ray origin that represents camera positionfloat theta=iTime*0.5;float cameraRadius=10.;ro.x=cameraRadius*cos(theta)+lp.x; //以lp为圆心,以cameraRadius为半径,theta为角度ro.z=cameraRadius*sin(theta)+lp.z;
完整代码
#define PIXW (1./iResolution.y)const int MAX_STEPS = 100;
const float START_DIST = 0.001;
const float MAX_DIST = 100.0;
const float EPSILON = 0.0001;struct SDFResult
{float d;vec3 color;
};
mat4 rotationX(float theta)
{return mat4(1.0, 0.0, 0.0, 0.0,0.0, cos(theta), -sin(theta), 0.0,0.0, sin(theta), cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}mat4 rotationY(float theta)
{return mat4(cos(theta), 0.0, sin(theta), 0.0,0.0, 1.0, 0.0, 0.0,-sin(theta), 0.0, cos(theta), 0.0,0.0, 0.0, 0.0, 1.0);
}
mat4 rotationZ(float theta)
{return mat4(cos(theta), -sin(theta), 0.0, 0.0,sin(theta), cos(theta), 0.0, 0.0,0.0, 0.0, 1.0, 0.0,0.0, 0.0, 0.0, 1.0);
}
SDFResult sdBox( vec3 p, vec3 b,vec3 offset,vec3 color )
{vec3 q = abs(p-offset) - b;return SDFResult(length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.0),color);
}vec3 getBackgroundColor(vec2 uv)
{
//uv.y [-1,1]
//y: [0,1] float y=(uv.y+1.)/2.; return mix(vec3(1,0,1),vec3(0,1,1),y);
}SDFResult sdSphere(vec3 p, float r,vec3 offset,vec3 color)
{return SDFResult(length(p-offset)-r,color);
}
SDFResult sdFloor(vec3 p,vec3 color)
{float d=p.y+1.;return SDFResult(d,color);
}SDFResult minWithColor(SDFResult a,SDFResult b)
{if (a.d<b.d){return a;}return b;
}mat3 camera(vec3 cameraPos, vec3 lookAtPoint, vec3 upVector) {vec3 cd = normalize(lookAtPoint - cameraPos); // camera directionvec3 cr = normalize(cross(upVector, cd)); // camera rightvec3 cu = normalize(cross(cd, cr)); // camera upreturn mat3(-cr, cu, -cd); //转换为x轴向右,y轴向上,z轴向屏幕外边的右手坐标系
}SDFResult sdScene(vec3 p)
{SDFResult result1=sdBox(p,vec3(1.,1.0,1.),vec3(-4,0.2,-4),vec3(1.,0.,0.));SDFResult result2=sdBox(p,vec3(1.,1.0,1.),vec3(0,0.2,-4),vec3(0.,1.,0.));SDFResult result3=sdBox(p,vec3(1.,1.0,1.),vec3(4,0.2,-4),vec3(0.,0.,1.));SDFResult result=minWithColor(result1,result2);result=minWithColor(result,result3);vec3 floorColor = vec3(1. + 0.7*mod(floor(p.x) + floor(p.z), 2.0));result=minWithColor(result, sdFloor(p,floorColor));return result;
}
//法线计算
vec3 calcNormal(vec3 p) {vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilonfloat r = 1.; // radius of spherereturn normalize(e.xyy * sdScene(p + e.xyy).d +e.yyx * sdScene(p + e.yyx).d +e.yxy * sdScene(p + e.yxy).d +e.xxx * sdScene(p + e.xxx).d);
}SDFResult rayMarch(vec3 ro, vec3 rd,float start,float end)
{float d=start;float r=1.0;SDFResult result;for(int i=0;i<MAX_STEPS;i++){vec3 p=ro+rd*d;result=sdScene(p);d+=result.d;if(result.d<EPSILON || d>end) break;}result.d=d;return result;
}void mainImage( out vec4 fragColor, in vec2 fragCoord )
{// Normalized pixel coordinates (from -1 to 1)vec2 uv = (2.0*fragCoord-iResolution.xy)/iResolution.xx;float r=0.3;vec3 backgroundColor = vec3(0.835, 1, 1);//vec3 c=getBackgroundColor(uv);vec3 c=backgroundColor;vec3 lp=vec3(0,0.2,-4);vec3 ro = vec3(0, 0, 3); // ray origin that represents camera positionfloat theta=iTime*0.5;float cameraRadius=10.;ro.x=cameraRadius*cos(theta)+lp.x;ro.z=cameraRadius*sin(theta)+lp.z;vec3 rd = camera(ro,lp,vec3(0,1,0))*normalize(vec3(uv, -1)); // ray directionSDFResult result=rayMarch(ro,rd,START_DIST,MAX_DIST);float d=result.d;if(d<MAX_DIST){//平行光源的漫反射计算vec3 p=ro+rd*d;vec3 n=calcNormal(p);vec3 lightPosition=vec3(2,2,7);//vec3 light_direction=normalize(vec3(1,0,5));vec3 light_direction=normalize(lightPosition-p);vec3 light_color=vec3(1,1,1);float diffuse=max(0.0,dot(n,light_direction));diffuse=clamp(diffuse,0.1,1.0);c=light_color*diffuse*result.color+backgroundColor*0.2;}// Output to screenfragColor = vec4(vec3(c),1.0);
}
1.7. 参考
- 10.1 带有观察点的相机模型 | Shadertoy中文教程
- 10.2 围绕目标旋转摄像机 | Shadertoy中文教程
- LearnOpenGL - Camera