首页 > 技术文章 > unity urp raytrace体积光god ray效果

linshaochuan 2021-06-24 17:37 原文

试了一下在unity中使用raytrace方式实现体积光,

运行效果如下

raytrace开销较大,采样加到200几乎卡得跑不动了

首先在光源处拍摄场景(unity对mainlight做了这个处理,并且是级联可设置)

基本原理是在全屏路径下,根据场景深度,还原出世界坐标

根据世界坐标判断是哪个裁切球

(如果不是级联阴影,比如spotlight就不需要这个操作)

然后将世界坐标变换到光源观察坐标light_view_pos

再根据投影矩阵对应到光源纹理的深度纹理

比较当前点在光源摄像机的深度,判断是不是处于遮挡(即阴影)

原理和纹理阴影的处理方式类似

附shader代码如下

Shader "lsc/RaytraceShader"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _raytrace_step_count("rayrace step count", Int) = 5
        _scale("scale", float) = 1.0
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            HLSLPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            #pragma multi_compile _ _MAIN_LIGHT_SHADOWS
            #pragma multi_compile _ _MAIN_LIGHT_SHADOWS_CASCADE
            #pragma multi_compile_fragment _ _ADDITIONAL_LIGHT_SHADOWS
            #pragma multi_compile_fragment _ _SHADOWS_SOFT

            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
                float4 screen_pos : TEXCOORD1;
            };

            float4x4 _mtx_view_inv;
            float4x4 _mtx_proj_inv;
            TEXTURE2D_X_FLOAT(_CameraDepthTexture);
            SAMPLER(sampler_CameraDepthTexture);

            v2f vert (appdata v)
            {
                v2f o;

                VertexPositionInputs vertexInput = GetVertexPositionInputs(v.vertex.xyz);
                o.vertex = vertexInput.positionCS;
                o.screen_pos = ComputeScreenPos(o.vertex);

                o.uv = v.uv;

                return o;
            }

            sampler2D _MainTex;
            int _raytrace_step_count;
            float _scale;

            float4 cal_world_pos_by_dep(float ndc_dep, float2 screen_space, out float4 view_pos)
            {
                // 取出非线性深度与视深度
                float linearDepthZ = LinearEyeDepth(ndc_dep, _ZBufferParams);
                // 屏幕转ndc
                float4 ndc_pos;
                ndc_pos.xy = screen_space * 2.0 - 1.0;
                ndc_pos.zw = float2(ndc_dep, 1);
                // 添加齐次因子
                ndc_pos = ndc_pos * linearDepthZ;
                // 转成观察与世界坐标
                view_pos = mul(_mtx_proj_inv, ndc_pos);
                float4 world_pos = mul(_mtx_view_inv, float4(view_pos.xyz, 1));

                return world_pos;
            }


            float4 frag (v2f i) : SV_Target
            {
                float4 col = tex2D(_MainTex, i.uv);

                // 插值后的屏幕坐标去除齐次因子
                float2 screen_space = i.screen_pos.xy / i.screen_pos.w;
                // 取出非线性深度
                float org_depth = SAMPLE_TEXTURE2D_X(_CameraDepthTexture, sampler_CameraDepthTexture, screen_space).x;
                // 计算世界坐标
                float4 view_pos;
                float4 world_pos = cal_world_pos_by_dep(org_depth, screen_space, view_pos);

                float3 cam_wpos = GetCameraPositionWS();
                float3 v_step = (world_pos - cam_wpos) / _raytrace_step_count;

                float3 rt_start = cam_wpos;
                float shadow_atten = 0;
                UNITY_LOOP
                for (int i = 0; i < _raytrace_step_count; i++)//循环,超级低效
                {
                    float4 shadow_coord = TransformWorldToShadowCoord(rt_start);
                    rt_start += v_step;

                    Light mainLight = GetMainLight(shadow_coord);//这样产生了级联阴影采样
                    shadow_atten += mainLight.shadowAttenuation;
                }

                shadow_atten = (shadow_atten / _raytrace_step_count) * _scale;

                col.rgb = col.rgb * shadow_atten;

                return col;
            }
            ENDHLSL
        }
    }
}

对应的urp管线cs代码

using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;
using System;

public class RayTraceFogRenderPassFeature : ScriptableRendererFeature
{
    class CustomRenderPass : ScriptableRenderPass
    {
        public Material raytrace_material_;
        public RenderTargetIdentifier render_target_color_;
        public RenderTargetHandle temp_render_target_;
        public int raytrace_count_ = 5;
        public float scale_ = 1.0f;


        // This method is called before executing the render pass.
        // It can be used to configure render targets and their clear state. Also to create temporary render target textures.
        // When empty this render pass will render to the active camera render target.
        // You should never call CommandBuffer.SetRenderTarget. Instead call <c>ConfigureTarget</c> and <c>ConfigureClear</c>.
        // The render pipeline will ensure target setup and clearing happens in a performant manner.
        public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData)
        {
        }

        // Here you can implement the rendering logic.
        // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers
        // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html
        // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline.
        public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
        {
            if (!raytrace_material_)
                return;

            raytrace_material_.SetInt("_raytrace_step_count", raytrace_count_);
            raytrace_material_.SetFloat("_scale", scale_);

            {
                Camera cam = renderingData.cameraData.camera;
                var mtx_view_inv = cam.worldToCameraMatrix.inverse;
                var mtx_proj_inv = cam.projectionMatrix.inverse;

                raytrace_material_.SetMatrix("_mtx_view_inv", mtx_view_inv);
                raytrace_material_.SetMatrix("_mtx_proj_inv", mtx_proj_inv);
            }

            const string CommandBufferTag = "raytrace fog Pass";
            var cmd = CommandBufferPool.Get(CommandBufferTag);

            RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor;
            opaqueDesc.depthBufferBits = 0;
            cmd.GetTemporaryRT(temp_render_target_.id, opaqueDesc);

            // 通过材质,将计算结果存入临时缓冲区
            cmd.Blit(render_target_color_, temp_render_target_.Identifier(), raytrace_material_);
            // 再从临时缓冲区存入主纹理
            cmd.Blit(temp_render_target_.Identifier(), render_target_color_);

            // 执行命令缓冲区
            context.ExecuteCommandBuffer(cmd);
            // 释放命令缓存
            CommandBufferPool.Release(cmd);
            // 释放临时RT
            cmd.ReleaseTemporaryRT(temp_render_target_.id);
        }

        // Cleanup any allocated resources that were created during the execution of this render pass.
        public override void OnCameraCleanup(CommandBuffer cmd)
        {
        }
    }

    CustomRenderPass m_ScriptablePass;
    public Material raytrace_material_;
    public int raytrace_count_ = 5;
    public float scale_ = 1.0f;

    /// <inheritdoc/>
    public override void Create()
    {
        m_ScriptablePass = new CustomRenderPass();

        // Configures where the render pass should be injected.
        m_ScriptablePass.renderPassEvent = RenderPassEvent.AfterRenderingOpaques;
    }

    // Here you can inject one or multiple render passes in the renderer.
    // This method is called when setting up the renderer once per-camera.
    public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
    {
        m_ScriptablePass.render_target_color_ = renderer.cameraColorTarget;
        m_ScriptablePass.raytrace_material_ = raytrace_material_;
        m_ScriptablePass.raytrace_count_ = raytrace_count_;
        m_ScriptablePass.scale_ = scale_;

        renderer.EnqueuePass(m_ScriptablePass);
    }
}


推荐阅读