美文网首页
LearnOpenGL 摄像机

LearnOpenGL 摄像机

作者: li_礼光 | 来源:发表于2020-09-30 17:17 被阅读0次

摄像机

简单粗暴的摄像机的概念 :

我拿着摄像机,对着人或者物体, 固定在一个姿势,然后拍照的一个动作. 这个时候我们关注的焦点是摄像机和被拍摄物体之间的关系.

摄像机

PS : 这里的(0,0,0)理解为默认物体的位置

摄像机与物体之间的关系

简单粗暴理解 :
1.摄像机的坐标位置 (在哪里)
2.摄像机的观察角度 (横着拍还是竖着拍)
3.被拍摄物体的坐标位置 (在哪里)

科教式理解 :
1.摄像机位置
2.摄像机方向
3.右轴
4.上轴



LookAt

使用矩阵的好处之一是如果你定义了一个坐标空间,里面有3个相互垂直的轴,你可以用这三个轴外加一个平移向量来创建一个矩阵,你可以用这个矩阵乘以任何向量来变换到那个坐标空间。这正是LookAt矩阵所做的,现在我们有了3个相互垂直的轴和一个定义摄像机空间的位置坐标,我们可以创建我们自己的LookAt矩阵了:

glm::mat4 view;
view = glm::lookAt(glm::vec3(0.0f, 0.0f, 3.0f), 
           glm::vec3(0.0f, 0.0f, 0.0f), 
           glm::vec3(0.0f, 1.0f, 0.0f));

摄像机(Camera)中的具体定义解释,简单粗暴来讲是, 在LearnOpenGL 坐标系统(1)中,对应Shader中的视图矩阵view

 Vclip          = Mprojection   ⋅   Mview   ⋅   Mmodel  ⋅   Vlocal
 对应 :
 gl_Position    = projection    *   view    *   model   *   vec4(aPos, 1.0);
 
 注意矩阵运算的顺序是相反的(记住我们需要从右往左阅读矩阵的乘法)。
 最后的顶点应该被赋值到顶点着色器中的gl_Position,OpenGL将会自动进行透视除法和裁剪。

glm::lookAt

函数定义 :

/// Build a look at view matrix based on the default handedness.
///
/// @param eye Position of the camera
/// @param center Position where the camera is looking at
/// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1)
///
/// @tparam T A floating-point scalar type
/// @tparam Q A value from qualifier enum
GLM_FUNC_DECL mat<4, 4, T, Q> lookAt(
  vec<3, T, Q> const& eye,      //摄像机位置
  vec<3, T, Q> const& center,   //目标
  vec<3, T, Q> const& up);      //上向量
理解这三个内容, 基本就很简单了.
  • 摄像机位置 : 你拿着相机在哪个坐标位置(参考坐标系是目标物体)
  • 目标 : 被拍摄的对象, 一般坐标在于中心点, 比如默认 (0,0,0).
  • 上向量 : 你怎么拿的相机, 是竖着拿,还是横着拿


实践

以摄像机方式LookAt实现图像展示

Shader

#define STRINGIZE(x) #x
#define SHADER(shader) STRINGIZE(shader)

/// 着色器程序之间的数据传递
static char *myCameraVertexShaderStr = SHADER(
    \#version 330 core\n
    layout (location = 0) in vec3 position; //顶点数据源输入
    layout (location = 1) in vec2 texCoords; //纹理数据源输入(2D)
                                              
    out vec2 vertexTexCoords;//把片元着色器的颜色从这里输出
                                                 
    uniform mat4 myProjection;//投影矩阵
    uniform mat4 myView;//观察矩阵
    uniform mat4 myModel;//模型矩阵
                               
    void main()
    {
        gl_Position = myProjection * myView * myModel * vec4(position, 1.0f);
        vertexTexCoords = texCoords;
    }
);

//片元着色器程序
static char *myCameraFragmentShaderSrc = SHADER(
    \#version 330 core\n
    in vec2 vertexTexCoords;//从顶点着色器中拿纹理的值
                        
    uniform sampler2D myTexture;
                                                               
    out vec4 FragColor;
                                                               
    void main()
    {
      FragColor = texture(myTexture, vertexTexCoords);
    }
);

顶点数据

和坐标系统一样, 一个立方体.

GLfloat myCameraVertices[] = {
   -0.5f, -0.5f, -0.5f,  0.0f, 0.0f,
    0.5f, -0.5f, -0.5f,  1.0f, 0.0f,
    0.5f,  0.5f, -0.5f,  1.0f, 1.0f,
    0.5f,  0.5f, -0.5f,  1.0f, 1.0f,
   -0.5f,  0.5f, -0.5f,  0.0f, 1.0f,
   -0.5f, -0.5f, -0.5f,  0.0f, 0.0f,

   -0.5f, -0.5f,  0.5f,  0.0f, 0.0f,
    0.5f, -0.5f,  0.5f,  1.0f, 0.0f,
    0.5f,  0.5f,  0.5f,  1.0f, 1.0f,
    0.5f,  0.5f,  0.5f,  1.0f, 1.0f,
   -0.5f,  0.5f,  0.5f,  0.0f, 1.0f,
   -0.5f, -0.5f,  0.5f,  0.0f, 0.0f,

   -0.5f,  0.5f,  0.5f,  1.0f, 0.0f,
   -0.5f,  0.5f, -0.5f,  1.0f, 1.0f,
   -0.5f, -0.5f, -0.5f,  0.0f, 1.0f,
   -0.5f, -0.5f, -0.5f,  0.0f, 1.0f,
   -0.5f, -0.5f,  0.5f,  0.0f, 0.0f,
   -0.5f,  0.5f,  0.5f,  1.0f, 0.0f,

    0.5f,  0.5f,  0.5f,  1.0f, 0.0f,
    0.5f,  0.5f, -0.5f,  1.0f, 1.0f,
    0.5f, -0.5f, -0.5f,  0.0f, 1.0f,
    0.5f, -0.5f, -0.5f,  0.0f, 1.0f,
    0.5f, -0.5f,  0.5f,  0.0f, 0.0f,
    0.5f,  0.5f,  0.5f,  1.0f, 0.0f,

   -0.5f, -0.5f, -0.5f,  0.0f, 1.0f,
    0.5f, -0.5f, -0.5f,  1.0f, 1.0f,
    0.5f, -0.5f,  0.5f,  1.0f, 0.0f,
    0.5f, -0.5f,  0.5f,  1.0f, 0.0f,
   -0.5f, -0.5f,  0.5f,  0.0f, 0.0f,
   -0.5f, -0.5f, -0.5f,  0.0f, 1.0f,

   -0.5f,  0.5f, -0.5f,  0.0f, 1.0f,
    0.5f,  0.5f, -0.5f,  1.0f, 1.0f,
    0.5f,  0.5f,  0.5f,  1.0f, 0.0f,
    0.5f,  0.5f,  0.5f,  1.0f, 0.0f,
   -0.5f,  0.5f,  0.5f,  0.0f, 0.0f,
   -0.5f,  0.5f, -0.5f,  0.0f, 1.0f
};

程序

#include <iostream>
#include "MyCamera.hpp"
#include "MyProgram.hpp"
#include "MyCameraShader.hpp"
#include "MyCameraVertices.hpp"
#include "glm.hpp"
#include "matrix_transform.hpp"
#include "type_ptr.hpp"

#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_STATIC
#include "stb_image.h"

int runMyCameraCube() {
    int result = glfwInit();
    if (result == GL_FALSE) {
        printf("glfwInit 初始化失败");
        return -1;
    }
    
    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
    GLFWwindow *window = glfwCreateWindow(600, 400, "My Opengl Window", NULL, NULL);
    if(!window) {
        printf("window 创建失败");
    }
    glfwMakeContextCurrent(window);
    gladLoadGLLoader((GLADloadproc)glfwGetProcAddress);
    
    //切换为纹理着色器程序
    MyProgram myProgram = MyProgram(myCameraVertexShaderStr, myCameraFragmentShaderSrc);

    GLuint VBO , VAO ;
    unsigned int squareIndicesCount = 0;
    glGenVertexArrays(1, &VAO);
    glGenBuffers(1, &VBO);
    glBindVertexArray(VAO);

    glBindBuffer(GL_ARRAY_BUFFER, VBO);
    glBufferData(GL_ARRAY_BUFFER, sizeof(myCameraVertices), myCameraVertices, GL_STATIC_DRAW);
    
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid*)(0 * sizeof(GLfloat)));
    glEnableVertexAttribArray(0);

    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
    glEnableVertexAttribArray(1);

    glBindVertexArray(0);
    squareIndicesCount = sizeof(myCameraVertices)/(sizeof(myCameraVertices[0]) * 5);
    
    unsigned int texture;
    unsigned char *data;
    int width, height, nrChannels;
    glGenTextures(1, &texture);
    glBindTexture(GL_TEXTURE_2D, texture);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    
    data = stbi_load( "/Users/liliguang/Desktop/LearnOpengl/LearnOpenGl/LearnOpenGl/Demo/Common/ImgSources/dizhuan.jpg" , &width, &height, &nrChannels, 0);
    if (data)
    {
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
        glGenerateMipmap(GL_TEXTURE_2D);
        glBindTexture(GL_TEXTURE_2D, 0);
    }
    else
    {
        std::cout << "Failed to load texture" << std::endl;
    }
    stbi_image_free(data);
    
    glEnable(GL_DEPTH_TEST);

    //进行绘制
    while(!glfwWindowShouldClose(window)){
        glfwPollEvents();
        
        glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glUseProgram(myProgram.program);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, texture);
        glUniform1i(glGetUniformLocation(myProgram.program, "myTexture"), 0);
        
        glm::mat4 model = glm::mat4(1.0f);
        glm::mat4 view = glm::mat4(1.0f);
        glm::mat4 projection = glm::mat4(1.0f);
        
        GLint myModelLoc = glGetUniformLocation(myProgram.program,"myModel");
        GLint myViewLoc = glGetUniformLocation(myProgram.program,"myView");
        GLint myProjectionLoc = glGetUniformLocation(myProgram.program,"myProjection");
        
        projection = glm::perspective(glm::radians(45.0f), 1.0f, 0.01f, 100.f);//投影矩阵
        glUniformMatrix4fv(myProjectionLoc, 1, GL_FALSE, glm::value_ptr(projection));
        glUniformMatrix4fv(myModelLoc, 1, GL_FALSE, glm::value_ptr(model));
        
        glBindVertexArray(VAO);
 
        GLfloat radius = 2.0f;
        GLfloat camX = sin(glfwGetTime()) * radius;
        GLfloat camZ = cos(glfwGetTime()) * radius;
        glm::vec3 cameraEye     = glm::vec3(0.0f, 0.0f, 4.0f);//摄像机位置
        glm::vec3 cameraCenter  = glm::vec3(0.0f, 0.0f, 0.0f);//目标坐标
        glm::vec3 cameraUp      = glm::vec3(0.0f, 1.0f  , 0.0f);//向上向量 
     
        view = glm::lookAt(cameraEye,       //摄像机位置
                           cameraCenter,    //目标
                           cameraUp);       //上向量
        //观察矩阵变换                   
        glUniformMatrix4fv(myViewLoc, 1, GL_FALSE, glm::value_ptr(view));

        glDrawArrays(GL_TRIANGLES, 0, squareIndicesCount);
        glBindVertexArray(0);

        glfwSwapBuffers(window);
    }

    //程序销毁
    glfwTerminate();
    
    return 1;
}


效果 :
glm::vec3 cameraEye     = glm::vec3(0.0f, 0.0f, 4.0f);//摄像机位置
正对一面
修改摄像机位置坐标
glm::vec3 cameraEye     = glm::vec3(4.0f, 4.0f, 4.0f);//摄像机位置
摄像机坐标改变

总结:

简单了解通过LookAt改变观察矩阵.



Window控制输入

通过键盘或者鼠标的方式来移动模型

思路 : 整体还是通过矩阵变换, 处理投影矩阵, 观察矩阵, 来改变顶点数据的值.

// 移动方法函数定义
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode);
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset);
void mouse_callback(GLFWwindow* window, double xpos, double ypos);
void do_movement();

//全局摄像机LookAt参数声明
glm::vec3 cameraEye   = glm::vec3(0.0f, 0.0f,  3.0f);
glm::vec3 cameraCenter = glm::vec3(0.0f, 0.0f, -1.0f);
glm::vec3 cameraUp    = glm::vec3(0.0f, 1.0f,  0.0f);

//相关参数初始值
GLfloat yaw    = -90.0f;    // Yaw is initialized to -90.0 degrees since a yaw of 0.0 results in a direction vector pointing to the right (due to how Eular angles work) so we initially rotate a bit to the left.
GLfloat pitch  =  0.0f;
GLfloat lastX  =  600.0f  / 2.0;
GLfloat lastY  =  400.0f / 2.0;
GLfloat fov =  45.0f;
bool keys[1024];

GLfloat lastFrame = 0.0f;      // Time of last frame

移动方法函数实现

// Is called whenever a key is pressed/released via GLFW
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode)
{
    if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
        glfwSetWindowShouldClose(window, GL_TRUE);
    if (key >= 0 && key < 1024)
    {
        if (action == GLFW_PRESS)
            keys[key] = true;
        else if (action == GLFW_RELEASE)
            keys[key] = false;
    }
}


// Moves/alters the camera positions based on user input
void do_movement()
{
    // Camera controls
    GLfloat cameraSpeed = 0.05f;
    if (keys[GLFW_KEY_W])//后
        cameraEye += cameraSpeed * cameraCenter;
    if (keys[GLFW_KEY_S])//前
        cameraEye -= cameraSpeed * cameraCenter;
    if (keys[GLFW_KEY_A])//左
        cameraEye -= glm::normalize(glm::cross(cameraCenter, cameraUp)) * cameraSpeed;
    if (keys[GLFW_KEY_D])//右
        cameraEye += glm::normalize(glm::cross(cameraCenter, cameraUp)) * cameraSpeed;
    
}

bool firstMouse = true;
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
    if (firstMouse)
    {
        lastX = xpos;
        lastY = ypos;
        firstMouse = false;
    }

    GLfloat xoffset = xpos - lastX;
    GLfloat yoffset = lastY - ypos; // Reversed since y-coordinates go from bottom to left
    lastX = xpos;
    lastY = ypos;

    GLfloat sensitivity = 0.05;    // Change this value to your liking
    xoffset *= sensitivity;
    yoffset *= sensitivity;

    yaw   += xoffset;
    pitch += yoffset;

    // Make sure that when pitch is out of bounds, screen doesn't get flipped
    if (pitch > 89.0f)
        pitch = 89.0f;
    if (pitch < -89.0f)
        pitch = -89.0f;

    glm::vec3 front;
    front.x = cos(glm::radians(yaw)) * cos(glm::radians(pitch));
    front.y = sin(glm::radians(pitch));
    front.z = sin(glm::radians(yaw)) * cos(glm::radians(pitch));
    cameraCenter = glm::normalize(front);
}

void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
    if (fov >= 1.0f && fov <= 45.0f)
        fov -= yoffset * 0.05f;
    if (fov <= 1.0f)
        fov = 1.0f;
    if (fov >= 45.0f)
        fov = 45.0f;
}
修改程序
int runMyCameraCube() {
    int result = glfwInit();
    if (result == GL_FALSE) {
        printf("glfwInit 初始化失败");
        return -1;
    }
    
    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
    GLFWwindow *window = glfwCreateWindow(600, 400, "My Opengl Window", NULL, NULL);
    if(!window) {
        printf("window 创建失败");
    }
    glfwMakeContextCurrent(window);
    gladLoadGLLoader((GLADloadproc)glfwGetProcAddress);
    
    //设置(键盘鼠标)输入事件
    //此函数设置指定窗口的按键回调,当按下,重复或释放按键时调用该回调。
    glfwSetKeyCallback(window, key_callback);
    
    //此函数设置指定窗口的光标位置回调,在移动光标时调用该回调。 回调提供了相对于窗口内容区域左上角的屏幕坐标位置。
    glfwSetCursorPosCallback(window, mouse_callback);

    //此函数设置指定窗口的滚动回调,在使用滚动设备(例如鼠标滚轮或触摸板的滚动区域)时调用此回调。
    //滚动回调接收所有滚动输入,例如来自鼠标滚轮或触摸板滚动区域的滚动输入。
    glfwSetScrollCallback(window, scroll_callback);

    //glfwSetInputMode
    //第一个参数, 当前的Window
    //第二个参数, 要设置的模式
    //第三个参数, 模式对应的值
    //光标模式 : GLFW_CURSOR
    //光标模式值 : GLFW_CURSOR_DISABLED
    glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
    
  
    //切换为纹理着色器程序
    MyProgram myProgram = MyProgram(myCameraVertexShaderStr, myCameraFragmentShaderSrc);

    ///
    GLuint VBO , VAO ;
    unsigned int squareIndicesCount = 0;
    glGenVertexArrays(1, &VAO);
    glGenBuffers(1, &VBO);
    glBindVertexArray(VAO);

    glBindBuffer(GL_ARRAY_BUFFER, VBO);
    glBufferData(GL_ARRAY_BUFFER, sizeof(myCameraVertices), myCameraVertices, GL_STATIC_DRAW);
    
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid*)(0 * sizeof(GLfloat)));
    glEnableVertexAttribArray(0);

    //纹理坐标, 纹理坐标用的三角形坐标一致
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
    glEnableVertexAttribArray(1);

    //解绑VAO
    glBindVertexArray(0);
    squareIndicesCount = sizeof(myCameraVertices)/(sizeof(myCameraVertices[0]) * 5);
    

    //生成纹理
    unsigned int texture;
    unsigned char *data;
    int width, height, nrChannels;
    glGenTextures(1, &texture);
    glBindTexture(GL_TEXTURE_2D, texture);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    
    data = stbi_load( "/Users/liliguang/Desktop/LearnOpengl/LearnOpenGl/LearnOpenGl/Demo/Common/ImgSources/dizhuan.jpg" , &width, &height, &nrChannels, 0);
    if (data)
    {
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
        glGenerateMipmap(GL_TEXTURE_2D);
        glBindTexture(GL_TEXTURE_2D, 0);
    }
    else
    {
        std::cout << "Failed to load texture" << std::endl;
    }
    stbi_image_free(data);
    
    glEnable(GL_DEPTH_TEST);

    //进行绘制
    while(!glfwWindowShouldClose(window)){
        glfwPollEvents();
        do_movement();
        
        glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glUseProgram(myProgram.program);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, texture);
        glUniform1i(glGetUniformLocation(myProgram.program, "myTexture"), 0);
        
        glm::mat4 model = glm::mat4(1.0f);
        glm::mat4 view = glm::mat4(1.0f);
        glm::mat4 projection = glm::mat4(1.0f);
        
        GLint myModelLoc = glGetUniformLocation(myProgram.program,"myModel");
        GLint myViewLoc = glGetUniformLocation(myProgram.program,"myView");
        GLint myProjectionLoc = glGetUniformLocation(myProgram.program,"myProjection");
        
        projection = glm::perspective(fov, 1.0f, 0.01f, 100.f);//投影矩阵
        glUniformMatrix4fv(myProjectionLoc, 1, GL_FALSE, glm::value_ptr(projection));
        glUniformMatrix4fv(myModelLoc, 1, GL_FALSE, glm::value_ptr(model));

        
        glBindVertexArray(VAO);
        view = glm::lookAt(cameraEye,       //摄像机位置
                           cameraCenter,    //目标
                           cameraUp);       //上向量
                    
        glUniformMatrix4fv(myViewLoc, 1, GL_FALSE, glm::value_ptr(view));
        glDrawArrays(GL_TRIANGLES, 0, squareIndicesCount);
        glBindVertexArray(0);

        glfwSwapBuffers(window);
    }

    //程序销毁
    glfwTerminate();
    
    return 1;
}

效果 :

设备输入

总结 :

这里是通过添加对键盘鼠标的监听输入. 调用了
glfwSetKeyCallback(window, key_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetScrollCallback(window, scroll_callback);
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);

通过这几个方法来改变全局变量, 改变观察矩阵, 投影矩阵实现移动效果



最后, 对于一下矩阵运算, 点乘, 叉乘, 归一化等等参考以下博文:

矩阵和向量的乘法---点乘、叉乘、内积、外积、数乘、哈达玛积、克罗内克积
向量点乘叉乘、矩阵、OpenGL变化



相关文章

网友评论

      本文标题:LearnOpenGL 摄像机

      本文链接:https://www.haomeiwen.com/subject/rumduktx.html