c# - 在片段着色器中计算二维高斯滤波器
问题描述
我想计算二维高斯函数,输入是X,Y纹理UV坐标,得到相应的高斯值。
我在如何获得相应的 Texel 的 uv 高斯值方面遇到了困难。
float Gaussian2D(float x, float y)
{
float x_y_squared = x * x + y * y;
float stDevSquared = 2 *_2D_StandardDeviation * _2D_StandardDeviation;
float div = x_y_squared / stDevSquared;
float gauss = pow(E, -div);
return gauss;
}
float Gaussian(int offset)
{
float stDevSquared = _StandardDeviation * _StandardDeviation;
float gauss = (1 / sqrt(2 * PI * stDevSquared)) * pow(E, -((offset * offset) / (2 * stDevSquared)));
return gauss;
}
fixed4 frag(v2f i) : SV_Target
{
fixed source = tex2D(_MainTex, i.uv).r;
float g0 = Gaussian(0);
float g1 = Gaussian(1);
float g2 = Gaussian(2);
float g3 = Gaussian(3);
float g4 = Gaussian(4);
float g5 = Gaussian(5);
float omega = g0 + g1 + g2 + g3 + g4 + g5;
float gauss = Gaussian2D(i.uv.x, i.uv.y);
fixed prev_a = tex2D(_HistoryA, i.uv).r;
fixed prev_b = tex2D(_HistoryB, i.uv).r;
fixed prev_c = tex2D(_HistoryC, i.uv).r;
fixed prev_d = tex2D(_HistoryD, i.uv).r;
fixed prev_e = tex2D(_HistoryE, i.uv).r;
fixed current = (gauss*source * g0 + gauss*prev_a * g1 + gauss*prev_b * g2 + gauss*prev_c * g3 + gauss*prev_d * g4 + gauss*prev_e * g5)/(omega);
float diff = source - prev_a;
if (diff <= _dataDelta)
{
return current;
}
return source;
}
ENDCG
}
Spektre 的惊人作品更新
sampler2D _MainTex;
sampler2D _HistoryA;
sampler2D _HistoryB;
sampler2D _HistoryC;
sampler2D _HistoryD;
float4 _MainTex_TexelSize;
float _dataDelta;
float _blurRadius;
float _stepsDelta;
float _resolution;
float4 _MainTex_ST;
float _StandardDeviation;
#define E 2.71828182846
#define PI 3.14159265359
v2f vert(appdata v) {
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
float Gaussian(int offset)
{
float stDevSquared = _StandardDeviation * _StandardDeviation;
float gauss = (1 / sqrt(2 * PI * stDevSquared)) * pow(E, -((offset * offset) / (2 * stDevSquared)));
return gauss;
}
float blur2d_horizontal(sampler2D tex, v2f i, float hstep, float vstep) {
float2 uv = i.uv;
float sum = 0;
float2 tc = uv;
//blur radius in pixels
float blur = _blurRadius / _resolution / 4;
sum += tex2D(tex, float2(tc.x - 4.0 * blur * hstep, tc.y - 4.0 * blur * vstep)).r * 0.0162162162;
sum += tex2D(tex, float2(tc.x - 3.0 * blur * hstep, tc.y - 3.0 * blur * vstep)).r * 0.0540540541;
sum += tex2D(tex, float2(tc.x - 2.0 * blur * hstep, tc.y - 2.0 * blur * vstep)).r * 0.1216216216;
sum += tex2D(tex, float2(tc.x - 1.0 * blur * hstep, tc.y - 1.0 * blur * vstep)).r * 0.1945945946;
sum += tex2D(tex, float2(tc.x, tc.y)).r * 0.2270270270;
sum += tex2D(tex, float2(tc.x + 1.0 * blur * hstep, tc.y + 1.0 * blur * vstep)).r * 0.1945945946;
sum += tex2D(tex, float2(tc.x + 2.0 * blur * hstep, tc.y + 2.0 * blur * vstep)).r * 0.1216216216;
sum += tex2D(tex, float2(tc.x + 3.0 * blur * hstep, tc.y + 3.0 * blur * vstep)).r * 0.0540540541;
sum += tex2D(tex, float2(tc.x + 4.0 * blur * hstep, tc.y + 4.0 * blur * vstep)).r * 0.0162162162;
return sum;
}
fixed4 frag(v2f i) : SV_Target {
const int m = 5;
float d = 5.0;
float z[m];
float gauss_curve[m];
float zed;
_resolution = 900;
z[0] = tex2D(_MainTex, i.uv).r;// oldest 2 frames
z[1] = tex2D(_HistoryA, i.uv).r;
if (abs(z[0] - z[1]) < _dataDelta) // threshold depth change
{
// z[0] = 0.0;
// 2D spatial gauss blur of z0
z[0] = blur2d_horizontal(_MainTex, i, _stepsDelta, _stepsDelta);
// fetch depths from up to m frames
z[2] = tex2D(_HistoryB, i.uv).r;
z[3] = tex2D(_HistoryC, i.uv).r;
z[4] = tex2D(_HistoryD, i.uv).r;
zed = 0.0;
gauss_curve[0] = Gaussian(0);
gauss_curve[1] = Gaussian(1);
gauss_curve[2] = Gaussian(2);
gauss_curve[3] = Gaussian(3);
gauss_curve[4] = Gaussian(4);
float sum = 0.0;
// 1D temporal gauss blur
for (int idx = 1; idx <= m; idx++)
{
zed += gauss_curve[idx - 1] * z[idx - 1];
}
}
else
zed = z[0];
return fixed4(zed, zed, zed, 0.0);
}
解决方案
好的,我想我设法做到了……好吧 +/- 作为等式:
只是符号简化(在 CV/DIP 中很常见)不是完整的方程,不是唯一确定的......所以它的解释(和实现)并不清楚......但是我设法将缺失的东西组合成这样的东西(GLSL) :
//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
layout(location=0) in vec4 vertex;
out vec2 pos; // screen position <-1,+1>
void main()
{
pos=vertex.xy;
gl_Position=vertex;
}
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos; // screen position <-1,+1>
out vec4 gl_FragColor; // fragment output color
uniform sampler2D txr_rgb;
uniform sampler2D txr_zed0;
uniform sampler2D txr_zed1;
uniform sampler2D txr_zed2;
uniform sampler2D txr_zed3;
uniform sampler2D txr_zed4;
uniform float xs,ys; // texture resolution
uniform float r; // blur radius
//---------------------------------------------------------------------------
float G(float t)
{
return 0.0;
}
//---------------------------------------------------------------------------
void main()
{
vec2 p;
vec4 rgb;
const int m=5;
const float Th=0.0015;
float z[m],zed;
p=0.5*(pos+1.0); // p = pos position in texture
rgb=texture2D(txr_rgb ,p); // rgb color (just for view)
z[0]=texture2D(txr_zed0,p).r; // oldest 2 frames
z[1]=texture2D(txr_zed1,p).r;
if (abs(z[0]-z[1])>Th) // threshold depth change
{
int i;
float x,y,xx,yy,rr,dx,dy,w,w0;
// 2D spatial gauss blur of z0
rr=r*r;
w0=0.3780/pow(r,1.975);
z[0]=0.0;
for (dx=1.0/xs,x=-r,p.x=0.5+(pos.x*0.5)+(x*dx);x<=r;x++,p.x+=dx){ xx=x*x;
for (dy=1.0/ys,y=-r,p.y=0.5+(pos.y*0.5)+(y*dy);y<=r;y++,p.y+=dy){ yy=y*y;
if (xx+yy<=rr)
{
w=w0*exp((-xx-yy)/(2.0*rr));
z[0]+=texture2D(txr_zed0,p).r*w;
}}}
// fetch depths from up to m frames
z[2]=texture2D(txr_zed2,p).r;
z[3]=texture2D(txr_zed3,p).r;
z[4]=texture2D(txr_zed4,p).r;
// 1D temporal gauss blur
for (zed=0.0,i=1;i<=m;i++) zed+=exp(0.5*float(i*i)/float(m*m))*z[i-1];
zed/=2.506628274631000502415765284811*float(m);
}
else zed=z[0];
zed*=20.0; // debug view: emphasize depth so its color is visible
// gl_FragColor=rgb; // debug view: render RGB texture
gl_FragColor=vec4(zed,zed,zed,0.0); // render resulting depth texture
}
//---------------------------------------------------------------------------
我使用这个数据集进行测试但是深度分辨率不是很好......
使用garlic_7_1
数据集我得到了这个结果(强调深度):
时间深度是m
(硬编码的),空间是r
(统一的)。最后一帧在最旧的m
帧中传递。必须选择阈值,以便算法选择正确的区域!!!txr_zed0...txr_zed(m-1)
txr_zed0
Th
为了使其正常工作,您应该txr_zed0
在应用此着色器后将其替换为结果(在 CPU 端或渲染到纹理,然后交换 id ...)。否则,空间高斯模糊将不会应用于较旧的帧。
[编辑1]
这里的预览(在里面输出红色if
而不是模糊)Th=0.01;
如您所见,它选择了边缘......所以变化(仅用于选择 Th)是:
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos; // screen position <-1,+1>
out vec4 gl_FragColor; // fragment output color
uniform sampler2D txr_rgb;
uniform sampler2D txr_zed0;
uniform sampler2D txr_zed1;
uniform sampler2D txr_zed2;
uniform sampler2D txr_zed3;
uniform sampler2D txr_zed4;
uniform float xs,ys; // texture resolution
uniform float r; // blur radius
//---------------------------------------------------------------------------
float G(float t)
{
return 0.0;
}
//---------------------------------------------------------------------------
void main()
{
vec2 p;
vec4 rgb;
const int m=5;
// const float Th=0.0015;
const float Th=0.01;
float z[m],zed;
p=0.5*(pos+1.0); // p = pos position in texture
rgb=texture2D(txr_rgb ,p); // rgb color (just for view)
z[0]=texture2D(txr_zed0,p).r; // oldest 2 frames
z[1]=texture2D(txr_zed1,p).r;
if (abs(z[0]-z[1])>Th) // threshold depth change
{
gl_FragColor=vec4(1.0,0.0,0.0,0.0); // debug output
return;
int i;
float x,y,xx,yy,rr,dx,dy,w,w0;
// 2D spatial gauss blur of z0
rr=r*r;
w0=0.3780/pow(r,1.975);
z[0]=0.0;
for (dx=1.0/xs,x=-r,p.x=0.5+(pos.x*0.5)+(x*dx);x<=r;x++,p.x+=dx){ xx=x*x;
for (dy=1.0/ys,y=-r,p.y=0.5+(pos.y*0.5)+(y*dy);y<=r;y++,p.y+=dy){ yy=y*y;
if (xx+yy<=rr)
{
w=w0*exp((-xx-yy)/(2.0*rr));
z[0]+=texture2D(txr_zed0,p).r*w;
}}}
// fetch depths from up to m frames
z[2]=texture2D(txr_zed2,p).r;
z[3]=texture2D(txr_zed3,p).r;
z[4]=texture2D(txr_zed4,p).r;
// 1D temporal gauss blur
w0=0.5/float(m*m);
for (zed=0.0,i=1;i<=m;i++) zed+=exp(w0*float(i*i))*z[i-1];
zed/=2.506628274631000502415765284811*float(m);
}
else zed=z[0];
zed*=40.0; // debug view: emphasize depth so its color is visible
// gl_FragColor=rgb; // debug view: render RGB texture
gl_FragColor=vec4(zed,zed,zed,0.0); // render resulting depth texture
}
//---------------------------------------------------------------------------
推荐阅读
- svn - PyCharm 撤消添加到 VCS (svn)
- angular - 如何在 Angular 中使用带有 graphQL Apollo 测试控制器的 rxjs 弹珠测试
- ios - iOS:如何实现遮罩视图事件穿透?
- c - 自动填充数组
- spring-mvc - jsp页面如何在一个表中添加多个列表
- python - 立即打印 Celery for Python 中已编程任务的任务 ID
- uwp - 为什么不能在通用 Windows 平台中使用 ServiceWorker
- android - 我们如何隐藏下载管理器在通知中显示的取消按钮
- python - 如何通过 Python3.7 中的 Pandas 数据框验证 Excel 工作表中的特定单元格值
- svg - 在颤动中改变路径的高度和宽度