首页 > 技术文章 > UnityShader——边缘检测

winsons 2020-07-09 14:01 原文

边缘检测

边缘检测有两种方式:
色差检测:以像素中心周围的像素颜色为根据判断中心像素点是不是在边缘线上。
深度法线检测:检测像素点所对应的视角空间中的深度和法线,以此做判断当前点是否在边缘上。

色差检测

我们可以首先回想一下边到底是如何形成的。如果相邻像素之间存在差别明显的颜色、亮度、纹理等属性,我们就会认为它们之间应该有一条边界。这种相邻像素之间的差值可以用梯度(gradient)来表示,可以想象得到,边缘处的梯度绝对值会比较大。基于这样的理解,有几种不同的边缘检测算子被先后提出来,分别是Robert算子、Prewitt算子、Sobel算子。
Robert算子:
\(\begin{bmatrix} -1 & 0 \\ 0 & -1 \end{bmatrix}\)

Prewitt算子:
\(\begin{bmatrix} -1 & -1 & -1 \\ 0 & 0 & 0 \\ 1 & 1 & 1 \end{bmatrix}\)

Sobel算子:
\(\begin{bmatrix} -1 & -2 & -1 \\ 0 & 0 & 0 \\ 1 & 2 & 1 \end{bmatrix}\)

色差检测边缘的原理:用一个卷积核做卷积运算,这个卷积核一般采用Sobel算子,先把卷积核置于像素中心做卷积运算,再翻转卷积核做一次卷积运算(其实就相当于先求出水平X方向梯度差Gx,再求出垂直y方向上梯度差Gy),再根据这两个梯度值得到整体梯度值G。
\(G = \sqrt{Gx^2 + Gy^2}\)
但是在Shader中,因为开根号的开销比较大所以,可以用加法运算代替。
\(G = \|Gx\| + \|Gy\|\)
根据这个梯度值G可以检测哪些像素点是边缘。
下面以Sobel算子为例写一个Shader:

//C#
using System.Collections;
using System.Collections.Generic;
using UnityEngine;

public class WSPostEffect : MonoBehaviour
{
    public Material material = null;
    public bool IsEdgeOnly;
    public Color edgeColor;
    public Color backgroundColor;
    public float edgeFactor;
    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        if (material == null)
            Graphics.Blit(source, destination);
        else
        {
            material.SetFloat("_EdgeOnly", IsEdgeOnly ? 1.0f : 0.0f);
            material.SetColor("_EdgeColor", edgeColor);
            material.SetColor("_BackgroundColor", backgroundColor);
            material.SetFloat("_EdgeFactor", edgeFactor);
            Graphics.Blit(source, destination, material, -1);
        }
    }
}

//Shader
Shader "WS/EdgeDetect"
{
	Properties
	{
		_MainTex ("Texture", 2D) = "white" {}
	}
	SubShader
	{
		Tags { "RenderType"="Transparent" "Queue"="Transparent" }
		LOD 100

		Pass
		{
			ZTest Always
			ZWrite Off
			Cull Off
			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag
			
			#include "UnityCG.cginc"

			struct appdata
			{
				float4 vertex : POSITION;
				float2 uv : TEXCOORD0;
			};

			struct v2f
			{
                                //像素点及其周围的9个uv坐标
				float2 uv[9] : TEXCOORD0;
				float4 vertex : SV_POSITION;
			};

			sampler2D _MainTex;
			float4 _MainTex_ST;
			half4 _MainTex_TexelSize;
                        //是否只显示边缘线条
			fixed _EdgeOnly;
                        //边缘线颜色
			fixed4 _EdgeColor;
                        //背景色
			fixed4 _BackgroundColor;
			

			
			v2f vert (appdata v)
			{
				v2f o;
				o.vertex = UnityObjectToClipPos(v.vertex);
				half2 uv = v.uv;
                                //计算限速点及其周围的9个uv坐标,放在顶点着色器计算能减少
				o.uv[0] = uv + _MainTex_TexelSize.xy * (-1, -1);
				o.uv[1] = uv + _MainTex_TexelSize.xy * (0, -1);
				o.uv[2] = uv + _MainTex_TexelSize.xy * (1, -1);
				o.uv[3] = uv + _MainTex_TexelSize.xy * (-1, 0);
				o.uv[4] = uv + _MainTex_TexelSize.xy * (0, 0);
				o.uv[5] = uv + _MainTex_TexelSize.xy * (1, 0);
				o.uv[6] = uv + _MainTex_TexelSize.xy * (-1, 1);
				o.uv[7] = uv + _MainTex_TexelSize.xy * (0, 1);
				o.uv[8] = uv + _MainTex_TexelSize.xy * (1, 1);
				return o;
			}

                        //计算像素的明度值
			fixed luminance(fixed4 color)
			{
				return 0.299 * color.r + 0.587 * color.g + 0.114 * color.b;
			}

			half Sobel(v2f v)
			{
                                //Sobel算子及其翻转后的算子
				const half Gx[9] = {-1, -2, -1,
						    0, 0, 0,
			    			    1, 2, 1};
				const half Gy[9] = {-1, 0, 1,
						   -2, 0, 2,
						   -1, 0, 1};
				half texColor;
                                //水平梯度值
				half edgeX = 0;
                                //垂直梯度值
				half edgeY = 0;
				for (int i = 0; i < 9; ++i)
				{
					texColor = luminance(tex2D(_MainTex, v.uv[i]));
					edgeX += texColor * Gx[i];
					edgeY += texColor * Gy[i];
				}
                                //越小越是边缘
				half edge = 1 - sqrt(edgeX * edgeX + edgeY * edgeY);
				//关注性能时可改为
				//half edge = 1 - abs(edgeX) - abs(edgeY);
				return edge;
			}
			
			fixed4 frag (v2f i) : SV_Target
			{
				half edge = Sobel(i);
				fixed4 edgeOnlyColor = lerp(_EdgeColor, _BackgroundColor, edge);
				fixed4 edgeWithColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[4]), step(0.3, edge));
				return lerp(edgeWithColor, edgeOnlyColor, _EdgeOnly);
			}
			ENDCG
		}
	}
}

效果:
使用前:

使用后:

深度法线检测

原理:分别比较像素两边对角线的深度和法线差异,判断深度或者法线差异是否达到一个阈值,打到阈值就判断为这个点在边缘上,只要其中一边的对角线上检测出边缘就代表这个像素点在边缘上。其实可以看出来这里用的是Robert算子。
简易实现:

//C#
using System.Collections;
using System.Collections.Generic;
using UnityEngine;

public class WSPostEffect : MonoBehaviour
{
    private Camera cam;
    public Material material = null;
    public bool IsEdgeOnly;
    public Color edgeColor;

    private void Awake()
    {
        cam = this.gameObject.GetComponent<Camera>();
        cam.depthTextureMode |= DepthTextureMode.DepthNormals;
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        if (material == null)
            Graphics.Blit(source, destination);
        else
        {
            material.SetFloat("_EdgeOnly", IsEdgeOnly ? 1.0f : 0.0f);
            material.SetColor("_EdgeColor", edgeColor);
            Graphics.Blit(source, destination, material, -1);
        }
    }
}

//Shader
Shader "WS/WS_EdgeDetectDepthNormal"
{
	Properties
	{
		_MainTex ("Texture", 2D) = "white" {}
	}
	SubShader
	{
		Tags { "RenderType"="Transparent" "Queue"="Transparent" }

		Pass
		{
                        ZTest Always
			ZWrite Off
			Cull Off
			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag
			
			#include "UnityCG.cginc"

			struct appdata
			{
				float4 vertex : POSITION;
				float2 uv : TEXCOORD0;
			};

			struct v2f
			{
				float2 uv[5] : TEXCOORD0;
				float4 vertex : SV_POSITION;
			};

			sampler2D _MainTex;
			half4 _MainTex_TexelSize;
			fixed4 _EdgeOnly;
			fixed4 _EdgeColor;
			fixed4 _BackgroundColor;
			sampler2D _CameraDepthNormalsTexture;
			
			
			v2f vert (appdata v)
			{
				v2f o;
				half2 uv = v.uv;
				o.vertex = UnityObjectToClipPos(v.vertex);
				o.uv[0] = uv;
				#if UNITY_UV_STARTS_AT_TOP
					if (_MainTex_TexelSize.y < 0)
						uv.y = 1 - uv.y;
				#endif
				o.uv[1] = uv + _MainTex_TexelSize.xy * half2(1, 1);
				o.uv[2] = uv + _MainTex_TexelSize.xy * half2(-1, 1);
				o.uv[3] = uv + _MainTex_TexelSize.xy * half2(1, -1);
				o.uv[4] = uv + _MainTex_TexelSize.xy * half2(-1, -1);
				return o;
			}

			float CheckSame(half4 sample1, half4 sample2)
			{
				//这个并不是真正的法线,只是说可以直接拿这个xy去作比较得到两个法线的差异性
				half2 sample1_normal = sample1.xy;
				float sample1_depth = DecodeFloatRG(sample1.zw);
				half2 sample2_normal = sample2.xy;
				float sample2_depth = DecodeFloatRG(sample2.zw);
				//法线差异
				half2 diff_normal = abs(sample1_normal - sample2_normal);
				int is_same_normal = (diff_normal.x, diff_normal.y) < 0.1;
				//深度差异
				float diff_depth = abs(sample1_depth - sample2_depth);
				//这里这么做是想做相对比较,如果sample1的深度值很大的话,那么小的深度差异可以忽略,大的深度差才算。如果sample1的深度值比较小的话,那么在小的深度差异之下想要检测出边缘出来就要用较小的深度差异做判断。
				//这时候就可以以sample1为基准来判断他们的深度差是否足够被判断成是边缘,而这个因子是0.1
				int is_same_depth = diff_depth < 0.1 * sample1_depth;
				return is_same_normal * is_same_depth ? 1.0 : 0.0;
			}
			
			fixed4 frag (v2f i) : SV_Target
			{
				half4 sample1 = tex2D(_CameraDepthNormalsTexture, i.uv[1]);
				half4 sample2 = tex2D(_CameraDepthNormalsTexture, i.uv[2]);
				half4 sample3 = tex2D(_CameraDepthNormalsTexture, i.uv[3]);
				half4 sample4 = tex2D(_CameraDepthNormalsTexture, i.uv[4]);

				half edge = 1.0;
				edge *= CheckSame(sample1, sample4);
				edge *= CheckSame(sample2, sample3);

				fixed4 edgeWithColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[0]), edge);
				fixed4 edgeOnlyColor = lerp(_EdgeColor, _BackgroundColor, edge);

				return lerp(edgeWithColor, edgeOnlyColor, _EdgeOnly);
			}
			ENDCG
		}
	}
}

效果:
使用前:

使用后:

只显示边缘线:

推荐阅读